⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/fs/
flock.rs

1//! POSIX File Locking (flock/fcntl)
2//!
3//! Implements both whole-file locking (flock semantics) and byte-range
4//! locking (fcntl/POSIX semantics). Per-inode lock tables track ownership
5//! by PID, with deadlock detection for blocking lock requests.
6//!
7//! - `flock()`: whole-file advisory locks (LOCK_SH, LOCK_EX, LOCK_UN)
8//! - `fcntl_setlk()`: byte-range locks (F_SETLK semantics, non-blocking)
9//! - `fcntl_setlkw()`: byte-range locks (F_SETLKW semantics, blocking with
10//!   deadlock detection)
11//! - `fcntl_getlk()`: query conflicting locks (F_GETLK)
12//! - `cleanup_process_locks()`: release all locks held by a process on exit
13
14#![allow(dead_code)]
15
16use alloc::{collections::BTreeMap, vec::Vec};
17
18#[cfg(not(target_arch = "aarch64"))]
19use spin::RwLock;
20
21#[cfg(target_arch = "aarch64")]
22use super::bare_lock::RwLock;
23use crate::error::KernelError;
24
25// ---------------------------------------------------------------------------
26// POSIX flock() constants
27// ---------------------------------------------------------------------------
28
29/// Shared (read) lock
30pub const LOCK_SH: u32 = 1;
31/// Exclusive (write) lock
32pub const LOCK_EX: u32 = 2;
33/// Non-blocking flag (OR with LOCK_SH or LOCK_EX)
34pub const LOCK_NB: u32 = 4;
35/// Unlock
36pub const LOCK_UN: u32 = 8;
37
38// ---------------------------------------------------------------------------
39// POSIX fcntl() lock type constants
40// ---------------------------------------------------------------------------
41
42/// Read (shared) lock
43pub const F_RDLCK: u16 = 0;
44/// Write (exclusive) lock
45pub const F_WRLCK: u16 = 1;
46/// Unlock
47pub const F_UNLCK: u16 = 2;
48
49// ---------------------------------------------------------------------------
50// Data structures
51// ---------------------------------------------------------------------------
52
53/// A POSIX byte-range lock (used by fcntl F_SETLK / F_GETLK).
54#[derive(Debug, Clone, Copy, PartialEq, Eq)]
55pub struct FileLock {
56    /// Lock type: F_RDLCK, F_WRLCK, or F_UNLCK.
57    pub lock_type: u16,
58    /// Starting byte offset of the locked region.
59    pub start: u64,
60    /// Length of the locked region (0 means "to end of file").
61    pub len: u64,
62    /// PID of the lock owner.
63    pub pid: u64,
64}
65
66impl FileLock {
67    /// Compute the exclusive end offset of this lock region.
68    /// A length of 0 means "to end of file", represented as u64::MAX.
69    fn end(&self) -> u64 {
70        if self.len == 0 {
71            u64::MAX
72        } else {
73            self.start.saturating_add(self.len)
74        }
75    }
76
77    /// Check whether two lock regions overlap.
78    fn overlaps(&self, other: &FileLock) -> bool {
79        self.start < other.end() && other.start < self.end()
80    }
81}
82
83/// A whole-file advisory lock entry (flock semantics).
84#[derive(Debug, Clone, Copy, PartialEq, Eq)]
85pub struct FlockEntry {
86    /// Lock type: LOCK_SH or LOCK_EX.
87    pub lock_type: u32,
88    /// PID of the lock owner.
89    pub pid: u64,
90}
91
92/// Per-system lock table tracking all file locks by inode.
93struct LockTable {
94    /// Whole-file locks (flock): inode -> list of FlockEntry
95    flock_locks: BTreeMap<u64, Vec<FlockEntry>>,
96    /// Byte-range locks (fcntl): inode -> list of FileLock
97    range_locks: BTreeMap<u64, Vec<FileLock>>,
98}
99
100impl LockTable {
101    const fn new() -> Self {
102        Self {
103            flock_locks: BTreeMap::new(),
104            range_locks: BTreeMap::new(),
105        }
106    }
107}
108
109/// Global lock table, protected by a reader-writer lock.
110static LOCK_TABLE: RwLock<LockTable> = RwLock::new(LockTable::new());
111
112// ---------------------------------------------------------------------------
113// flock() implementation
114// ---------------------------------------------------------------------------
115
116/// Apply a whole-file advisory lock (flock semantics).
117///
118/// `operation` is a combination of LOCK_SH/LOCK_EX/LOCK_UN and optionally
119/// LOCK_NB. Returns `WouldBlock` when LOCK_NB is set and the lock cannot
120/// be acquired immediately.
121pub fn flock(inode: u64, pid: u64, operation: u32) -> Result<(), KernelError> {
122    let op = operation & !LOCK_NB;
123    let non_blocking = (operation & LOCK_NB) != 0;
124
125    match op {
126        LOCK_UN => flock_unlock(inode, pid),
127        LOCK_SH => flock_lock(inode, pid, LOCK_SH, non_blocking),
128        LOCK_EX => flock_lock(inode, pid, LOCK_EX, non_blocking),
129        _ => Err(KernelError::InvalidArgument {
130            name: "operation",
131            value: "invalid flock operation",
132        }),
133    }
134}
135
136/// Acquire a whole-file lock (shared or exclusive).
137fn flock_lock(inode: u64, pid: u64, lock_type: u32, non_blocking: bool) -> Result<(), KernelError> {
138    let mut table = LOCK_TABLE.write();
139    let entries = table.flock_locks.entry(inode).or_default();
140
141    // Check for conflicts with existing locks.
142    for entry in entries.iter() {
143        // Same process can upgrade/downgrade its own lock.
144        if entry.pid == pid {
145            continue;
146        }
147        // Shared locks conflict only with exclusive requests.
148        // Exclusive locks conflict with any other lock.
149        let conflict = lock_type == LOCK_EX || entry.lock_type == LOCK_EX;
150        if conflict {
151            if non_blocking {
152                return Err(KernelError::WouldBlock);
153            }
154            // In a real kernel we would block here. For now, return WouldBlock
155            // since we have no wait-queue infrastructure wired for flock.
156            return Err(KernelError::WouldBlock);
157        }
158    }
159
160    // Remove any existing lock from this PID (upgrade/downgrade).
161    entries.retain(|e| e.pid != pid);
162
163    // Insert the new lock.
164    entries.push(FlockEntry { lock_type, pid });
165    Ok(())
166}
167
168/// Release a whole-file lock held by the given PID.
169fn flock_unlock(inode: u64, pid: u64) -> Result<(), KernelError> {
170    let mut table = LOCK_TABLE.write();
171    if let Some(entries) = table.flock_locks.get_mut(&inode) {
172        entries.retain(|e| e.pid != pid);
173        if entries.is_empty() {
174            table.flock_locks.remove(&inode);
175        }
176    }
177    Ok(())
178}
179
180// ---------------------------------------------------------------------------
181// fcntl() range lock implementation
182// ---------------------------------------------------------------------------
183
184/// Check whether a proposed range lock conflicts with any existing lock.
185///
186/// Returns `true` if there is a conflict (i.e., the new lock cannot be placed).
187fn check_range_conflict(existing: &[FileLock], new_lock: &FileLock) -> bool {
188    for lock in existing {
189        // Same owner can always re-lock.
190        if lock.pid == new_lock.pid {
191            continue;
192        }
193        // Locks must overlap to conflict.
194        if !lock.overlaps(new_lock) {
195            continue;
196        }
197        // Two read locks never conflict.
198        if lock.lock_type == F_RDLCK && new_lock.lock_type == F_RDLCK {
199            continue;
200        }
201        // At least one write lock and overlapping => conflict.
202        return true;
203    }
204    false
205}
206
207/// Find the first existing lock that conflicts with the proposed lock.
208///
209/// Returns `Some(conflicting_lock)` if a conflict exists, `None` otherwise.
210fn find_conflicting_lock(existing: &[FileLock], query: &FileLock) -> Option<FileLock> {
211    for lock in existing {
212        if lock.pid == query.pid {
213            continue;
214        }
215        if !lock.overlaps(query) {
216            continue;
217        }
218        if lock.lock_type == F_RDLCK && query.lock_type == F_RDLCK {
219            continue;
220        }
221        return Some(*lock);
222    }
223    None
224}
225
226/// Set a byte-range lock (F_SETLK semantics -- non-blocking).
227///
228/// If `lock.lock_type` is F_UNLCK, removes matching locks from the table.
229/// Otherwise, checks for conflicts and inserts the lock if none exist.
230pub fn fcntl_setlk(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
231    if lock.lock_type == F_UNLCK {
232        return fcntl_unlock(inode, lock);
233    }
234
235    if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
236        return Err(KernelError::InvalidArgument {
237            name: "lock_type",
238            value: "must be F_RDLCK, F_WRLCK, or F_UNLCK",
239        });
240    }
241
242    let mut table = LOCK_TABLE.write();
243    let locks = table.range_locks.entry(inode).or_default();
244
245    if check_range_conflict(locks, lock) {
246        return Err(KernelError::WouldBlock);
247    }
248
249    // Merge / replace: remove any existing locks from the same PID that
250    // overlap with the new lock, then insert the new one.
251    let pid = lock.pid;
252    let new_start = lock.start;
253    let new_end = lock.end();
254    locks.retain(|existing| {
255        if existing.pid != pid {
256            return true;
257        }
258        // Keep non-overlapping locks from the same owner.
259        !(existing.start < new_end && new_start < existing.end())
260    });
261
262    locks.push(*lock);
263    Ok(())
264}
265
266/// Set a byte-range lock with blocking (F_SETLKW semantics).
267///
268/// Performs simple cycle-based deadlock detection before blocking. If a
269/// deadlock is detected, returns a deadlock error. Since we lack a full
270/// wait-queue, this currently falls back to `WouldBlock` when the lock
271/// cannot be acquired and no deadlock is detected.
272pub fn fcntl_setlkw(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
273    if lock.lock_type == F_UNLCK {
274        return fcntl_unlock(inode, lock);
275    }
276
277    if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
278        return Err(KernelError::InvalidArgument {
279            name: "lock_type",
280            value: "must be F_RDLCK, F_WRLCK, or F_UNLCK",
281        });
282    }
283
284    let mut table = LOCK_TABLE.write();
285    let locks = table.range_locks.entry(inode).or_default();
286
287    if !check_range_conflict(locks, lock) {
288        // No conflict -- place the lock immediately.
289        let pid = lock.pid;
290        let new_start = lock.start;
291        let new_end = lock.end();
292        locks.retain(|existing| {
293            if existing.pid != pid {
294                return true;
295            }
296            !(existing.start < new_end && new_start < existing.end())
297        });
298        locks.push(*lock);
299        return Ok(());
300    }
301
302    // Conflict exists. Check for deadlocks by seeing if any of the holders
303    // we would wait on are themselves waiting on us (simple cycle detection
304    // across all inodes).
305    if detect_deadlock(&table, lock) {
306        return Err(KernelError::InvalidState {
307            expected: "no deadlock",
308            actual: "deadlock detected",
309        });
310    }
311
312    // In a real kernel we would sleep on a wait-queue. For now, return
313    // WouldBlock since no scheduler integration is wired for file locks.
314    Err(KernelError::WouldBlock)
315}
316
317/// Simple deadlock detector.
318///
319/// Starting from `requester`, walks the "waits-for" graph: if process A
320/// requests a lock that conflicts with a lock held by process B, then A
321/// waits-for B. If following this chain leads back to A, a deadlock exists.
322///
323/// This is O(P * L) where P is the number of processes in the chain and L
324/// is the total number of range locks, which is acceptable for the expected
325/// lock counts in practice.
326fn detect_deadlock(table: &LockTable, request: &FileLock) -> bool {
327    let requester_pid = request.pid;
328    let mut visited = Vec::new();
329    let mut frontier = Vec::new();
330
331    // Seed: find all PIDs that hold conflicting locks with our request.
332    for locks in table.range_locks.values() {
333        for lock in locks {
334            if lock.pid == requester_pid {
335                continue;
336            }
337            if !lock.overlaps(request) {
338                continue;
339            }
340            if lock.lock_type == F_RDLCK && request.lock_type == F_RDLCK {
341                continue;
342            }
343            if !frontier.contains(&lock.pid) {
344                frontier.push(lock.pid);
345            }
346        }
347    }
348
349    // BFS: for each PID in the frontier, check if any of the locks they
350    // are waiting for are held by the original requester.
351    while let Some(blocker_pid) = frontier.pop() {
352        if blocker_pid == requester_pid {
353            return true; // Cycle detected.
354        }
355        if visited.contains(&blocker_pid) {
356            continue;
357        }
358        visited.push(blocker_pid);
359
360        // Find all locks held by processes that would conflict with
361        // any lock request from `blocker_pid`. We approximate: if
362        // `blocker_pid` holds a write lock, any other write or read
363        // lock holder on the same range is potentially blocking it
364        // (and vice-versa). We look at all lock holders that conflict
365        // with any lock `blocker_pid` holds.
366        for locks in table.range_locks.values() {
367            let blocker_locks: Vec<&FileLock> =
368                locks.iter().filter(|l| l.pid == blocker_pid).collect();
369            for bl in &blocker_locks {
370                for other in locks {
371                    if other.pid == blocker_pid || visited.contains(&other.pid) {
372                        continue;
373                    }
374                    if !other.overlaps(bl) {
375                        continue;
376                    }
377                    if other.lock_type == F_RDLCK && bl.lock_type == F_RDLCK {
378                        continue;
379                    }
380                    if !frontier.contains(&other.pid) {
381                        frontier.push(other.pid);
382                    }
383                }
384            }
385        }
386    }
387
388    false
389}
390
391/// Unlock byte-range locks matching the given region for the owning PID.
392fn fcntl_unlock(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
393    let mut table = LOCK_TABLE.write();
394    if let Some(locks) = table.range_locks.get_mut(&inode) {
395        let unlock_start = lock.start;
396        let unlock_end = lock.end();
397
398        locks.retain(|existing| {
399            if existing.pid != lock.pid {
400                return true;
401            }
402            // Remove locks that fall entirely within the unlock region.
403            !(existing.start >= unlock_start && existing.end() <= unlock_end)
404        });
405
406        if locks.is_empty() {
407            table.range_locks.remove(&inode);
408        }
409    }
410    Ok(())
411}
412
413/// Query for a conflicting lock (F_GETLK semantics).
414///
415/// If a lock exists that would conflict with the described lock, returns
416/// `Some(conflicting_lock)`. Otherwise returns `None`, indicating the
417/// lock could be placed.
418pub fn fcntl_getlk(inode: u64, lock: &FileLock) -> Result<Option<FileLock>, KernelError> {
419    if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
420        return Err(KernelError::InvalidArgument {
421            name: "lock_type",
422            value: "must be F_RDLCK or F_WRLCK for F_GETLK",
423        });
424    }
425
426    let table = LOCK_TABLE.read();
427    let locks = match table.range_locks.get(&inode) {
428        Some(l) => l,
429        None => return Ok(None),
430    };
431
432    Ok(find_conflicting_lock(locks, lock))
433}
434
435// ---------------------------------------------------------------------------
436// Process cleanup
437// ---------------------------------------------------------------------------
438
439/// Remove all locks (flock and range) held by the specified PID.
440///
441/// Called during process exit to prevent leaked locks.
442pub fn cleanup_process_locks(pid: u64) {
443    let mut table = LOCK_TABLE.write();
444
445    // Clean up flock entries.
446    let empty_flock_inodes: Vec<u64> = table
447        .flock_locks
448        .iter_mut()
449        .filter_map(|(&inode, entries)| {
450            entries.retain(|e| e.pid != pid);
451            if entries.is_empty() {
452                Some(inode)
453            } else {
454                None
455            }
456        })
457        .collect();
458    for inode in empty_flock_inodes {
459        table.flock_locks.remove(&inode);
460    }
461
462    // Clean up range locks.
463    let empty_range_inodes: Vec<u64> = table
464        .range_locks
465        .iter_mut()
466        .filter_map(|(&inode, locks)| {
467            locks.retain(|l| l.pid != pid);
468            if locks.is_empty() {
469                Some(inode)
470            } else {
471                None
472            }
473        })
474        .collect();
475    for inode in empty_range_inodes {
476        table.range_locks.remove(&inode);
477    }
478}
479
480// ---------------------------------------------------------------------------
481// Tests
482// ---------------------------------------------------------------------------
483
484#[cfg(test)]
485mod tests {
486    use core::sync::atomic::{AtomicU64, Ordering};
487
488    use super::*;
489
490    /// Atomic counter to generate unique inode IDs per test, avoiding
491    /// cross-test interference when tests run in parallel on the shared
492    /// global LOCK_TABLE.
493    static NEXT_TEST_INODE: AtomicU64 = AtomicU64::new(0x1_0000_0000);
494
495    /// Allocate `n` unique inode IDs for a single test.
496    fn unique_inodes(n: usize) -> Vec<u64> {
497        let base = NEXT_TEST_INODE.fetch_add(n as u64, Ordering::Relaxed);
498        (0..n as u64).map(|i| base + i).collect()
499    }
500
501    // ---- flock tests ----
502
503    #[test]
504    fn test_flock_shared_compatible() {
505        let ids = unique_inodes(1);
506        assert!(flock(ids[0], 100, LOCK_SH).is_ok());
507        assert!(flock(ids[0], 200, LOCK_SH).is_ok());
508    }
509
510    #[test]
511    fn test_flock_exclusive_blocks_shared() {
512        let ids = unique_inodes(1);
513        assert!(flock(ids[0], 100, LOCK_EX).is_ok());
514        assert_eq!(
515            flock(ids[0], 200, LOCK_SH | LOCK_NB),
516            Err(KernelError::WouldBlock)
517        );
518    }
519
520    #[test]
521    fn test_flock_exclusive_blocks_exclusive() {
522        let ids = unique_inodes(1);
523        assert!(flock(ids[0], 100, LOCK_EX).is_ok());
524        assert_eq!(
525            flock(ids[0], 200, LOCK_EX | LOCK_NB),
526            Err(KernelError::WouldBlock)
527        );
528    }
529
530    #[test]
531    fn test_flock_shared_blocks_exclusive() {
532        let ids = unique_inodes(1);
533        assert!(flock(ids[0], 100, LOCK_SH).is_ok());
534        assert_eq!(
535            flock(ids[0], 200, LOCK_EX | LOCK_NB),
536            Err(KernelError::WouldBlock)
537        );
538    }
539
540    #[test]
541    fn test_flock_same_pid_upgrade() {
542        let ids = unique_inodes(1);
543        assert!(flock(ids[0], 100, LOCK_SH).is_ok());
544        // Same PID can upgrade to exclusive.
545        assert!(flock(ids[0], 100, LOCK_EX).is_ok());
546    }
547
548    #[test]
549    fn test_flock_unlock() {
550        let ids = unique_inodes(1);
551        assert!(flock(ids[0], 100, LOCK_EX).is_ok());
552        assert!(flock(ids[0], 100, LOCK_UN).is_ok());
553        // Now another process can lock.
554        assert!(flock(ids[0], 200, LOCK_EX).is_ok());
555    }
556
557    #[test]
558    fn test_flock_different_inodes_independent() {
559        let ids = unique_inodes(2);
560        assert!(flock(ids[0], 100, LOCK_EX).is_ok());
561        assert!(flock(ids[1], 200, LOCK_EX).is_ok());
562    }
563
564    #[test]
565    fn test_flock_invalid_operation() {
566        let ids = unique_inodes(1);
567        let result = flock(ids[0], 100, 0);
568        assert!(result.is_err());
569    }
570
571    // ---- fcntl range lock tests ----
572
573    #[test]
574    fn test_fcntl_read_locks_compatible() {
575        let ids = unique_inodes(1);
576        let l1 = FileLock {
577            lock_type: F_RDLCK,
578            start: 0,
579            len: 100,
580            pid: 100,
581        };
582        let l2 = FileLock {
583            lock_type: F_RDLCK,
584            start: 50,
585            len: 100,
586            pid: 200,
587        };
588        assert!(fcntl_setlk(ids[0], &l1).is_ok());
589        assert!(fcntl_setlk(ids[0], &l2).is_ok());
590    }
591
592    #[test]
593    fn test_fcntl_write_conflicts_read() {
594        let ids = unique_inodes(1);
595        let l1 = FileLock {
596            lock_type: F_RDLCK,
597            start: 0,
598            len: 100,
599            pid: 100,
600        };
601        let l2 = FileLock {
602            lock_type: F_WRLCK,
603            start: 50,
604            len: 100,
605            pid: 200,
606        };
607        assert!(fcntl_setlk(ids[0], &l1).is_ok());
608        assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
609    }
610
611    #[test]
612    fn test_fcntl_write_conflicts_write() {
613        let ids = unique_inodes(1);
614        let l1 = FileLock {
615            lock_type: F_WRLCK,
616            start: 0,
617            len: 100,
618            pid: 100,
619        };
620        let l2 = FileLock {
621            lock_type: F_WRLCK,
622            start: 50,
623            len: 50,
624            pid: 200,
625        };
626        assert!(fcntl_setlk(ids[0], &l1).is_ok());
627        assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
628    }
629
630    #[test]
631    fn test_fcntl_non_overlapping_no_conflict() {
632        let ids = unique_inodes(1);
633        let l1 = FileLock {
634            lock_type: F_WRLCK,
635            start: 0,
636            len: 100,
637            pid: 100,
638        };
639        let l2 = FileLock {
640            lock_type: F_WRLCK,
641            start: 100,
642            len: 100,
643            pid: 200,
644        };
645        assert!(fcntl_setlk(ids[0], &l1).is_ok());
646        assert!(fcntl_setlk(ids[0], &l2).is_ok());
647    }
648
649    #[test]
650    fn test_fcntl_same_pid_can_relock() {
651        let ids = unique_inodes(1);
652        let l1 = FileLock {
653            lock_type: F_WRLCK,
654            start: 0,
655            len: 100,
656            pid: 100,
657        };
658        let l2 = FileLock {
659            lock_type: F_RDLCK,
660            start: 0,
661            len: 100,
662            pid: 100,
663        };
664        assert!(fcntl_setlk(ids[0], &l1).is_ok());
665        // Same PID can downgrade.
666        assert!(fcntl_setlk(ids[0], &l2).is_ok());
667    }
668
669    #[test]
670    fn test_fcntl_unlock_region() {
671        let ids = unique_inodes(1);
672        let l1 = FileLock {
673            lock_type: F_WRLCK,
674            start: 0,
675            len: 100,
676            pid: 100,
677        };
678        assert!(fcntl_setlk(ids[0], &l1).is_ok());
679        let unlock = FileLock {
680            lock_type: F_UNLCK,
681            start: 0,
682            len: 100,
683            pid: 100,
684        };
685        assert!(fcntl_setlk(ids[0], &unlock).is_ok());
686        // Now another process can lock.
687        let l2 = FileLock {
688            lock_type: F_WRLCK,
689            start: 0,
690            len: 100,
691            pid: 200,
692        };
693        assert!(fcntl_setlk(ids[0], &l2).is_ok());
694    }
695
696    #[test]
697    fn test_fcntl_zero_len_means_eof() {
698        let ids = unique_inodes(1);
699        let l1 = FileLock {
700            lock_type: F_WRLCK,
701            start: 0,
702            len: 0,
703            pid: 100,
704        };
705        assert!(fcntl_setlk(ids[0], &l1).is_ok());
706        // Any range should conflict.
707        let l2 = FileLock {
708            lock_type: F_WRLCK,
709            start: 99999,
710            len: 1,
711            pid: 200,
712        };
713        assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
714    }
715
716    // ---- fcntl_getlk tests ----
717
718    #[test]
719    fn test_getlk_no_conflict() {
720        let ids = unique_inodes(1);
721        let query = FileLock {
722            lock_type: F_WRLCK,
723            start: 0,
724            len: 100,
725            pid: 100,
726        };
727        assert_eq!(fcntl_getlk(ids[0], &query).unwrap(), None);
728    }
729
730    #[test]
731    fn test_getlk_returns_conflicting_lock() {
732        let ids = unique_inodes(1);
733        let l1 = FileLock {
734            lock_type: F_WRLCK,
735            start: 0,
736            len: 100,
737            pid: 100,
738        };
739        assert!(fcntl_setlk(ids[0], &l1).is_ok());
740        let query = FileLock {
741            lock_type: F_WRLCK,
742            start: 50,
743            len: 50,
744            pid: 200,
745        };
746        let result = fcntl_getlk(ids[0], &query).unwrap();
747        assert!(result.is_some());
748        let conflict = result.unwrap();
749        assert_eq!(conflict.pid, 100);
750        assert_eq!(conflict.lock_type, F_WRLCK);
751    }
752
753    #[test]
754    fn test_getlk_invalid_type() {
755        let ids = unique_inodes(1);
756        let query = FileLock {
757            lock_type: F_UNLCK,
758            start: 0,
759            len: 100,
760            pid: 100,
761        };
762        assert!(fcntl_getlk(ids[0], &query).is_err());
763    }
764
765    // ---- setlkw + deadlock detection tests ----
766
767    #[test]
768    fn test_setlkw_no_conflict_succeeds() {
769        let ids = unique_inodes(1);
770        let l1 = FileLock {
771            lock_type: F_WRLCK,
772            start: 0,
773            len: 100,
774            pid: 100,
775        };
776        assert!(fcntl_setlkw(ids[0], &l1).is_ok());
777    }
778
779    #[test]
780    fn test_setlkw_conflict_returns_would_block() {
781        let ids = unique_inodes(1);
782        let l1 = FileLock {
783            lock_type: F_WRLCK,
784            start: 0,
785            len: 100,
786            pid: 100,
787        };
788        assert!(fcntl_setlkw(ids[0], &l1).is_ok());
789        let l2 = FileLock {
790            lock_type: F_WRLCK,
791            start: 0,
792            len: 100,
793            pid: 200,
794        };
795        // No deadlock, just a simple conflict -> WouldBlock.
796        assert_eq!(fcntl_setlkw(ids[0], &l2), Err(KernelError::WouldBlock));
797    }
798
799    #[test]
800    fn test_setlkw_deadlock_detection() {
801        let ids = unique_inodes(2);
802        // Use unique PIDs to avoid interference with other tests.
803        let pid_a: u64 = 9000;
804        let pid_b: u64 = 9001;
805
806        // PID A holds [0..100) on inode ids[0].
807        let la = FileLock {
808            lock_type: F_WRLCK,
809            start: 0,
810            len: 100,
811            pid: pid_a,
812        };
813        assert!(fcntl_setlk(ids[0], &la).is_ok());
814        // PID B holds [0..100) on inode ids[1].
815        let lb = FileLock {
816            lock_type: F_WRLCK,
817            start: 0,
818            len: 100,
819            pid: pid_b,
820        };
821        assert!(fcntl_setlk(ids[1], &lb).is_ok());
822
823        // PID B requests a write lock on inode ids[0] [0..100) (held by PID A).
824        // PID A does not hold anything on inode ids[1], so there is no deadlock cycle.
825        let req = FileLock {
826            lock_type: F_WRLCK,
827            start: 0,
828            len: 100,
829            pid: pid_b,
830        };
831        // Should be WouldBlock, not deadlock.
832        assert_eq!(fcntl_setlkw(ids[0], &req), Err(KernelError::WouldBlock));
833    }
834
835    // ---- cleanup tests ----
836
837    #[test]
838    fn test_cleanup_process_locks_flock() {
839        let ids = unique_inodes(2);
840        // Use unique PIDs.
841        let pid_owner: u64 = 7000;
842        let pid_other: u64 = 7001;
843        assert!(flock(ids[0], pid_owner, LOCK_EX).is_ok());
844        assert!(flock(ids[1], pid_owner, LOCK_SH).is_ok());
845        cleanup_process_locks(pid_owner);
846        // Both inodes should now be unlocked.
847        assert!(flock(ids[0], pid_other, LOCK_EX).is_ok());
848        assert!(flock(ids[1], pid_other, LOCK_EX).is_ok());
849    }
850
851    #[test]
852    fn test_cleanup_process_locks_range() {
853        let ids = unique_inodes(1);
854        let pid_owner: u64 = 7100;
855        let pid_other: u64 = 7101;
856        let l1 = FileLock {
857            lock_type: F_WRLCK,
858            start: 0,
859            len: 100,
860            pid: pid_owner,
861        };
862        assert!(fcntl_setlk(ids[0], &l1).is_ok());
863        cleanup_process_locks(pid_owner);
864        let l2 = FileLock {
865            lock_type: F_WRLCK,
866            start: 0,
867            len: 100,
868            pid: pid_other,
869        };
870        assert!(fcntl_setlk(ids[0], &l2).is_ok());
871    }
872
873    #[test]
874    fn test_cleanup_does_not_affect_other_pids() {
875        let ids = unique_inodes(2);
876        let pid_a: u64 = 7200;
877        let pid_b: u64 = 7201;
878        let pid_c: u64 = 7202;
879        assert!(flock(ids[0], pid_a, LOCK_EX).is_ok());
880        assert!(flock(ids[1], pid_b, LOCK_EX).is_ok());
881        cleanup_process_locks(pid_a);
882        // PID B's lock on ids[1] should still be in place.
883        assert_eq!(
884            flock(ids[1], pid_c, LOCK_EX | LOCK_NB),
885            Err(KernelError::WouldBlock)
886        );
887    }
888
889    // ---- FileLock overlap tests ----
890
891    #[test]
892    fn test_filelock_overlap_basic() {
893        let a = FileLock {
894            lock_type: F_WRLCK,
895            start: 0,
896            len: 100,
897            pid: 1,
898        };
899        let b = FileLock {
900            lock_type: F_WRLCK,
901            start: 50,
902            len: 100,
903            pid: 2,
904        };
905        assert!(a.overlaps(&b));
906        assert!(b.overlaps(&a));
907    }
908
909    #[test]
910    fn test_filelock_no_overlap() {
911        let a = FileLock {
912            lock_type: F_WRLCK,
913            start: 0,
914            len: 50,
915            pid: 1,
916        };
917        let b = FileLock {
918            lock_type: F_WRLCK,
919            start: 50,
920            len: 50,
921            pid: 2,
922        };
923        assert!(!a.overlaps(&b));
924        assert!(!b.overlaps(&a));
925    }
926
927    #[test]
928    fn test_filelock_zero_len_overlaps_everything() {
929        let whole = FileLock {
930            lock_type: F_WRLCK,
931            start: 0,
932            len: 0,
933            pid: 1,
934        };
935        let small = FileLock {
936            lock_type: F_WRLCK,
937            start: 999999,
938            len: 1,
939            pid: 2,
940        };
941        assert!(whole.overlaps(&small));
942        assert!(small.overlaps(&whole));
943    }
944
945    #[test]
946    fn test_filelock_end() {
947        let lock = FileLock {
948            lock_type: F_WRLCK,
949            start: 10,
950            len: 20,
951            pid: 1,
952        };
953        assert_eq!(lock.end(), 30);
954        let whole = FileLock {
955            lock_type: F_WRLCK,
956            start: 0,
957            len: 0,
958            pid: 1,
959        };
960        assert_eq!(whole.end(), u64::MAX);
961    }
962}