1#![allow(dead_code)]
15
16use alloc::{collections::BTreeMap, vec::Vec};
17
18#[cfg(not(target_arch = "aarch64"))]
19use spin::RwLock;
20
21#[cfg(target_arch = "aarch64")]
22use super::bare_lock::RwLock;
23use crate::error::KernelError;
24
25pub const LOCK_SH: u32 = 1;
31pub const LOCK_EX: u32 = 2;
33pub const LOCK_NB: u32 = 4;
35pub const LOCK_UN: u32 = 8;
37
38pub const F_RDLCK: u16 = 0;
44pub const F_WRLCK: u16 = 1;
46pub const F_UNLCK: u16 = 2;
48
49#[derive(Debug, Clone, Copy, PartialEq, Eq)]
55pub struct FileLock {
56 pub lock_type: u16,
58 pub start: u64,
60 pub len: u64,
62 pub pid: u64,
64}
65
66impl FileLock {
67 fn end(&self) -> u64 {
70 if self.len == 0 {
71 u64::MAX
72 } else {
73 self.start.saturating_add(self.len)
74 }
75 }
76
77 fn overlaps(&self, other: &FileLock) -> bool {
79 self.start < other.end() && other.start < self.end()
80 }
81}
82
83#[derive(Debug, Clone, Copy, PartialEq, Eq)]
85pub struct FlockEntry {
86 pub lock_type: u32,
88 pub pid: u64,
90}
91
92struct LockTable {
94 flock_locks: BTreeMap<u64, Vec<FlockEntry>>,
96 range_locks: BTreeMap<u64, Vec<FileLock>>,
98}
99
100impl LockTable {
101 const fn new() -> Self {
102 Self {
103 flock_locks: BTreeMap::new(),
104 range_locks: BTreeMap::new(),
105 }
106 }
107}
108
109static LOCK_TABLE: RwLock<LockTable> = RwLock::new(LockTable::new());
111
112pub fn flock(inode: u64, pid: u64, operation: u32) -> Result<(), KernelError> {
122 let op = operation & !LOCK_NB;
123 let non_blocking = (operation & LOCK_NB) != 0;
124
125 match op {
126 LOCK_UN => flock_unlock(inode, pid),
127 LOCK_SH => flock_lock(inode, pid, LOCK_SH, non_blocking),
128 LOCK_EX => flock_lock(inode, pid, LOCK_EX, non_blocking),
129 _ => Err(KernelError::InvalidArgument {
130 name: "operation",
131 value: "invalid flock operation",
132 }),
133 }
134}
135
136fn flock_lock(inode: u64, pid: u64, lock_type: u32, non_blocking: bool) -> Result<(), KernelError> {
138 let mut table = LOCK_TABLE.write();
139 let entries = table.flock_locks.entry(inode).or_default();
140
141 for entry in entries.iter() {
143 if entry.pid == pid {
145 continue;
146 }
147 let conflict = lock_type == LOCK_EX || entry.lock_type == LOCK_EX;
150 if conflict {
151 if non_blocking {
152 return Err(KernelError::WouldBlock);
153 }
154 return Err(KernelError::WouldBlock);
157 }
158 }
159
160 entries.retain(|e| e.pid != pid);
162
163 entries.push(FlockEntry { lock_type, pid });
165 Ok(())
166}
167
168fn flock_unlock(inode: u64, pid: u64) -> Result<(), KernelError> {
170 let mut table = LOCK_TABLE.write();
171 if let Some(entries) = table.flock_locks.get_mut(&inode) {
172 entries.retain(|e| e.pid != pid);
173 if entries.is_empty() {
174 table.flock_locks.remove(&inode);
175 }
176 }
177 Ok(())
178}
179
180fn check_range_conflict(existing: &[FileLock], new_lock: &FileLock) -> bool {
188 for lock in existing {
189 if lock.pid == new_lock.pid {
191 continue;
192 }
193 if !lock.overlaps(new_lock) {
195 continue;
196 }
197 if lock.lock_type == F_RDLCK && new_lock.lock_type == F_RDLCK {
199 continue;
200 }
201 return true;
203 }
204 false
205}
206
207fn find_conflicting_lock(existing: &[FileLock], query: &FileLock) -> Option<FileLock> {
211 for lock in existing {
212 if lock.pid == query.pid {
213 continue;
214 }
215 if !lock.overlaps(query) {
216 continue;
217 }
218 if lock.lock_type == F_RDLCK && query.lock_type == F_RDLCK {
219 continue;
220 }
221 return Some(*lock);
222 }
223 None
224}
225
226pub fn fcntl_setlk(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
231 if lock.lock_type == F_UNLCK {
232 return fcntl_unlock(inode, lock);
233 }
234
235 if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
236 return Err(KernelError::InvalidArgument {
237 name: "lock_type",
238 value: "must be F_RDLCK, F_WRLCK, or F_UNLCK",
239 });
240 }
241
242 let mut table = LOCK_TABLE.write();
243 let locks = table.range_locks.entry(inode).or_default();
244
245 if check_range_conflict(locks, lock) {
246 return Err(KernelError::WouldBlock);
247 }
248
249 let pid = lock.pid;
252 let new_start = lock.start;
253 let new_end = lock.end();
254 locks.retain(|existing| {
255 if existing.pid != pid {
256 return true;
257 }
258 !(existing.start < new_end && new_start < existing.end())
260 });
261
262 locks.push(*lock);
263 Ok(())
264}
265
266pub fn fcntl_setlkw(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
273 if lock.lock_type == F_UNLCK {
274 return fcntl_unlock(inode, lock);
275 }
276
277 if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
278 return Err(KernelError::InvalidArgument {
279 name: "lock_type",
280 value: "must be F_RDLCK, F_WRLCK, or F_UNLCK",
281 });
282 }
283
284 let mut table = LOCK_TABLE.write();
285 let locks = table.range_locks.entry(inode).or_default();
286
287 if !check_range_conflict(locks, lock) {
288 let pid = lock.pid;
290 let new_start = lock.start;
291 let new_end = lock.end();
292 locks.retain(|existing| {
293 if existing.pid != pid {
294 return true;
295 }
296 !(existing.start < new_end && new_start < existing.end())
297 });
298 locks.push(*lock);
299 return Ok(());
300 }
301
302 if detect_deadlock(&table, lock) {
306 return Err(KernelError::InvalidState {
307 expected: "no deadlock",
308 actual: "deadlock detected",
309 });
310 }
311
312 Err(KernelError::WouldBlock)
315}
316
317fn detect_deadlock(table: &LockTable, request: &FileLock) -> bool {
327 let requester_pid = request.pid;
328 let mut visited = Vec::new();
329 let mut frontier = Vec::new();
330
331 for locks in table.range_locks.values() {
333 for lock in locks {
334 if lock.pid == requester_pid {
335 continue;
336 }
337 if !lock.overlaps(request) {
338 continue;
339 }
340 if lock.lock_type == F_RDLCK && request.lock_type == F_RDLCK {
341 continue;
342 }
343 if !frontier.contains(&lock.pid) {
344 frontier.push(lock.pid);
345 }
346 }
347 }
348
349 while let Some(blocker_pid) = frontier.pop() {
352 if blocker_pid == requester_pid {
353 return true; }
355 if visited.contains(&blocker_pid) {
356 continue;
357 }
358 visited.push(blocker_pid);
359
360 for locks in table.range_locks.values() {
367 let blocker_locks: Vec<&FileLock> =
368 locks.iter().filter(|l| l.pid == blocker_pid).collect();
369 for bl in &blocker_locks {
370 for other in locks {
371 if other.pid == blocker_pid || visited.contains(&other.pid) {
372 continue;
373 }
374 if !other.overlaps(bl) {
375 continue;
376 }
377 if other.lock_type == F_RDLCK && bl.lock_type == F_RDLCK {
378 continue;
379 }
380 if !frontier.contains(&other.pid) {
381 frontier.push(other.pid);
382 }
383 }
384 }
385 }
386 }
387
388 false
389}
390
391fn fcntl_unlock(inode: u64, lock: &FileLock) -> Result<(), KernelError> {
393 let mut table = LOCK_TABLE.write();
394 if let Some(locks) = table.range_locks.get_mut(&inode) {
395 let unlock_start = lock.start;
396 let unlock_end = lock.end();
397
398 locks.retain(|existing| {
399 if existing.pid != lock.pid {
400 return true;
401 }
402 !(existing.start >= unlock_start && existing.end() <= unlock_end)
404 });
405
406 if locks.is_empty() {
407 table.range_locks.remove(&inode);
408 }
409 }
410 Ok(())
411}
412
413pub fn fcntl_getlk(inode: u64, lock: &FileLock) -> Result<Option<FileLock>, KernelError> {
419 if lock.lock_type != F_RDLCK && lock.lock_type != F_WRLCK {
420 return Err(KernelError::InvalidArgument {
421 name: "lock_type",
422 value: "must be F_RDLCK or F_WRLCK for F_GETLK",
423 });
424 }
425
426 let table = LOCK_TABLE.read();
427 let locks = match table.range_locks.get(&inode) {
428 Some(l) => l,
429 None => return Ok(None),
430 };
431
432 Ok(find_conflicting_lock(locks, lock))
433}
434
435pub fn cleanup_process_locks(pid: u64) {
443 let mut table = LOCK_TABLE.write();
444
445 let empty_flock_inodes: Vec<u64> = table
447 .flock_locks
448 .iter_mut()
449 .filter_map(|(&inode, entries)| {
450 entries.retain(|e| e.pid != pid);
451 if entries.is_empty() {
452 Some(inode)
453 } else {
454 None
455 }
456 })
457 .collect();
458 for inode in empty_flock_inodes {
459 table.flock_locks.remove(&inode);
460 }
461
462 let empty_range_inodes: Vec<u64> = table
464 .range_locks
465 .iter_mut()
466 .filter_map(|(&inode, locks)| {
467 locks.retain(|l| l.pid != pid);
468 if locks.is_empty() {
469 Some(inode)
470 } else {
471 None
472 }
473 })
474 .collect();
475 for inode in empty_range_inodes {
476 table.range_locks.remove(&inode);
477 }
478}
479
480#[cfg(test)]
485mod tests {
486 use core::sync::atomic::{AtomicU64, Ordering};
487
488 use super::*;
489
490 static NEXT_TEST_INODE: AtomicU64 = AtomicU64::new(0x1_0000_0000);
494
495 fn unique_inodes(n: usize) -> Vec<u64> {
497 let base = NEXT_TEST_INODE.fetch_add(n as u64, Ordering::Relaxed);
498 (0..n as u64).map(|i| base + i).collect()
499 }
500
501 #[test]
504 fn test_flock_shared_compatible() {
505 let ids = unique_inodes(1);
506 assert!(flock(ids[0], 100, LOCK_SH).is_ok());
507 assert!(flock(ids[0], 200, LOCK_SH).is_ok());
508 }
509
510 #[test]
511 fn test_flock_exclusive_blocks_shared() {
512 let ids = unique_inodes(1);
513 assert!(flock(ids[0], 100, LOCK_EX).is_ok());
514 assert_eq!(
515 flock(ids[0], 200, LOCK_SH | LOCK_NB),
516 Err(KernelError::WouldBlock)
517 );
518 }
519
520 #[test]
521 fn test_flock_exclusive_blocks_exclusive() {
522 let ids = unique_inodes(1);
523 assert!(flock(ids[0], 100, LOCK_EX).is_ok());
524 assert_eq!(
525 flock(ids[0], 200, LOCK_EX | LOCK_NB),
526 Err(KernelError::WouldBlock)
527 );
528 }
529
530 #[test]
531 fn test_flock_shared_blocks_exclusive() {
532 let ids = unique_inodes(1);
533 assert!(flock(ids[0], 100, LOCK_SH).is_ok());
534 assert_eq!(
535 flock(ids[0], 200, LOCK_EX | LOCK_NB),
536 Err(KernelError::WouldBlock)
537 );
538 }
539
540 #[test]
541 fn test_flock_same_pid_upgrade() {
542 let ids = unique_inodes(1);
543 assert!(flock(ids[0], 100, LOCK_SH).is_ok());
544 assert!(flock(ids[0], 100, LOCK_EX).is_ok());
546 }
547
548 #[test]
549 fn test_flock_unlock() {
550 let ids = unique_inodes(1);
551 assert!(flock(ids[0], 100, LOCK_EX).is_ok());
552 assert!(flock(ids[0], 100, LOCK_UN).is_ok());
553 assert!(flock(ids[0], 200, LOCK_EX).is_ok());
555 }
556
557 #[test]
558 fn test_flock_different_inodes_independent() {
559 let ids = unique_inodes(2);
560 assert!(flock(ids[0], 100, LOCK_EX).is_ok());
561 assert!(flock(ids[1], 200, LOCK_EX).is_ok());
562 }
563
564 #[test]
565 fn test_flock_invalid_operation() {
566 let ids = unique_inodes(1);
567 let result = flock(ids[0], 100, 0);
568 assert!(result.is_err());
569 }
570
571 #[test]
574 fn test_fcntl_read_locks_compatible() {
575 let ids = unique_inodes(1);
576 let l1 = FileLock {
577 lock_type: F_RDLCK,
578 start: 0,
579 len: 100,
580 pid: 100,
581 };
582 let l2 = FileLock {
583 lock_type: F_RDLCK,
584 start: 50,
585 len: 100,
586 pid: 200,
587 };
588 assert!(fcntl_setlk(ids[0], &l1).is_ok());
589 assert!(fcntl_setlk(ids[0], &l2).is_ok());
590 }
591
592 #[test]
593 fn test_fcntl_write_conflicts_read() {
594 let ids = unique_inodes(1);
595 let l1 = FileLock {
596 lock_type: F_RDLCK,
597 start: 0,
598 len: 100,
599 pid: 100,
600 };
601 let l2 = FileLock {
602 lock_type: F_WRLCK,
603 start: 50,
604 len: 100,
605 pid: 200,
606 };
607 assert!(fcntl_setlk(ids[0], &l1).is_ok());
608 assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
609 }
610
611 #[test]
612 fn test_fcntl_write_conflicts_write() {
613 let ids = unique_inodes(1);
614 let l1 = FileLock {
615 lock_type: F_WRLCK,
616 start: 0,
617 len: 100,
618 pid: 100,
619 };
620 let l2 = FileLock {
621 lock_type: F_WRLCK,
622 start: 50,
623 len: 50,
624 pid: 200,
625 };
626 assert!(fcntl_setlk(ids[0], &l1).is_ok());
627 assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
628 }
629
630 #[test]
631 fn test_fcntl_non_overlapping_no_conflict() {
632 let ids = unique_inodes(1);
633 let l1 = FileLock {
634 lock_type: F_WRLCK,
635 start: 0,
636 len: 100,
637 pid: 100,
638 };
639 let l2 = FileLock {
640 lock_type: F_WRLCK,
641 start: 100,
642 len: 100,
643 pid: 200,
644 };
645 assert!(fcntl_setlk(ids[0], &l1).is_ok());
646 assert!(fcntl_setlk(ids[0], &l2).is_ok());
647 }
648
649 #[test]
650 fn test_fcntl_same_pid_can_relock() {
651 let ids = unique_inodes(1);
652 let l1 = FileLock {
653 lock_type: F_WRLCK,
654 start: 0,
655 len: 100,
656 pid: 100,
657 };
658 let l2 = FileLock {
659 lock_type: F_RDLCK,
660 start: 0,
661 len: 100,
662 pid: 100,
663 };
664 assert!(fcntl_setlk(ids[0], &l1).is_ok());
665 assert!(fcntl_setlk(ids[0], &l2).is_ok());
667 }
668
669 #[test]
670 fn test_fcntl_unlock_region() {
671 let ids = unique_inodes(1);
672 let l1 = FileLock {
673 lock_type: F_WRLCK,
674 start: 0,
675 len: 100,
676 pid: 100,
677 };
678 assert!(fcntl_setlk(ids[0], &l1).is_ok());
679 let unlock = FileLock {
680 lock_type: F_UNLCK,
681 start: 0,
682 len: 100,
683 pid: 100,
684 };
685 assert!(fcntl_setlk(ids[0], &unlock).is_ok());
686 let l2 = FileLock {
688 lock_type: F_WRLCK,
689 start: 0,
690 len: 100,
691 pid: 200,
692 };
693 assert!(fcntl_setlk(ids[0], &l2).is_ok());
694 }
695
696 #[test]
697 fn test_fcntl_zero_len_means_eof() {
698 let ids = unique_inodes(1);
699 let l1 = FileLock {
700 lock_type: F_WRLCK,
701 start: 0,
702 len: 0,
703 pid: 100,
704 };
705 assert!(fcntl_setlk(ids[0], &l1).is_ok());
706 let l2 = FileLock {
708 lock_type: F_WRLCK,
709 start: 99999,
710 len: 1,
711 pid: 200,
712 };
713 assert_eq!(fcntl_setlk(ids[0], &l2), Err(KernelError::WouldBlock));
714 }
715
716 #[test]
719 fn test_getlk_no_conflict() {
720 let ids = unique_inodes(1);
721 let query = FileLock {
722 lock_type: F_WRLCK,
723 start: 0,
724 len: 100,
725 pid: 100,
726 };
727 assert_eq!(fcntl_getlk(ids[0], &query).unwrap(), None);
728 }
729
730 #[test]
731 fn test_getlk_returns_conflicting_lock() {
732 let ids = unique_inodes(1);
733 let l1 = FileLock {
734 lock_type: F_WRLCK,
735 start: 0,
736 len: 100,
737 pid: 100,
738 };
739 assert!(fcntl_setlk(ids[0], &l1).is_ok());
740 let query = FileLock {
741 lock_type: F_WRLCK,
742 start: 50,
743 len: 50,
744 pid: 200,
745 };
746 let result = fcntl_getlk(ids[0], &query).unwrap();
747 assert!(result.is_some());
748 let conflict = result.unwrap();
749 assert_eq!(conflict.pid, 100);
750 assert_eq!(conflict.lock_type, F_WRLCK);
751 }
752
753 #[test]
754 fn test_getlk_invalid_type() {
755 let ids = unique_inodes(1);
756 let query = FileLock {
757 lock_type: F_UNLCK,
758 start: 0,
759 len: 100,
760 pid: 100,
761 };
762 assert!(fcntl_getlk(ids[0], &query).is_err());
763 }
764
765 #[test]
768 fn test_setlkw_no_conflict_succeeds() {
769 let ids = unique_inodes(1);
770 let l1 = FileLock {
771 lock_type: F_WRLCK,
772 start: 0,
773 len: 100,
774 pid: 100,
775 };
776 assert!(fcntl_setlkw(ids[0], &l1).is_ok());
777 }
778
779 #[test]
780 fn test_setlkw_conflict_returns_would_block() {
781 let ids = unique_inodes(1);
782 let l1 = FileLock {
783 lock_type: F_WRLCK,
784 start: 0,
785 len: 100,
786 pid: 100,
787 };
788 assert!(fcntl_setlkw(ids[0], &l1).is_ok());
789 let l2 = FileLock {
790 lock_type: F_WRLCK,
791 start: 0,
792 len: 100,
793 pid: 200,
794 };
795 assert_eq!(fcntl_setlkw(ids[0], &l2), Err(KernelError::WouldBlock));
797 }
798
799 #[test]
800 fn test_setlkw_deadlock_detection() {
801 let ids = unique_inodes(2);
802 let pid_a: u64 = 9000;
804 let pid_b: u64 = 9001;
805
806 let la = FileLock {
808 lock_type: F_WRLCK,
809 start: 0,
810 len: 100,
811 pid: pid_a,
812 };
813 assert!(fcntl_setlk(ids[0], &la).is_ok());
814 let lb = FileLock {
816 lock_type: F_WRLCK,
817 start: 0,
818 len: 100,
819 pid: pid_b,
820 };
821 assert!(fcntl_setlk(ids[1], &lb).is_ok());
822
823 let req = FileLock {
826 lock_type: F_WRLCK,
827 start: 0,
828 len: 100,
829 pid: pid_b,
830 };
831 assert_eq!(fcntl_setlkw(ids[0], &req), Err(KernelError::WouldBlock));
833 }
834
835 #[test]
838 fn test_cleanup_process_locks_flock() {
839 let ids = unique_inodes(2);
840 let pid_owner: u64 = 7000;
842 let pid_other: u64 = 7001;
843 assert!(flock(ids[0], pid_owner, LOCK_EX).is_ok());
844 assert!(flock(ids[1], pid_owner, LOCK_SH).is_ok());
845 cleanup_process_locks(pid_owner);
846 assert!(flock(ids[0], pid_other, LOCK_EX).is_ok());
848 assert!(flock(ids[1], pid_other, LOCK_EX).is_ok());
849 }
850
851 #[test]
852 fn test_cleanup_process_locks_range() {
853 let ids = unique_inodes(1);
854 let pid_owner: u64 = 7100;
855 let pid_other: u64 = 7101;
856 let l1 = FileLock {
857 lock_type: F_WRLCK,
858 start: 0,
859 len: 100,
860 pid: pid_owner,
861 };
862 assert!(fcntl_setlk(ids[0], &l1).is_ok());
863 cleanup_process_locks(pid_owner);
864 let l2 = FileLock {
865 lock_type: F_WRLCK,
866 start: 0,
867 len: 100,
868 pid: pid_other,
869 };
870 assert!(fcntl_setlk(ids[0], &l2).is_ok());
871 }
872
873 #[test]
874 fn test_cleanup_does_not_affect_other_pids() {
875 let ids = unique_inodes(2);
876 let pid_a: u64 = 7200;
877 let pid_b: u64 = 7201;
878 let pid_c: u64 = 7202;
879 assert!(flock(ids[0], pid_a, LOCK_EX).is_ok());
880 assert!(flock(ids[1], pid_b, LOCK_EX).is_ok());
881 cleanup_process_locks(pid_a);
882 assert_eq!(
884 flock(ids[1], pid_c, LOCK_EX | LOCK_NB),
885 Err(KernelError::WouldBlock)
886 );
887 }
888
889 #[test]
892 fn test_filelock_overlap_basic() {
893 let a = FileLock {
894 lock_type: F_WRLCK,
895 start: 0,
896 len: 100,
897 pid: 1,
898 };
899 let b = FileLock {
900 lock_type: F_WRLCK,
901 start: 50,
902 len: 100,
903 pid: 2,
904 };
905 assert!(a.overlaps(&b));
906 assert!(b.overlaps(&a));
907 }
908
909 #[test]
910 fn test_filelock_no_overlap() {
911 let a = FileLock {
912 lock_type: F_WRLCK,
913 start: 0,
914 len: 50,
915 pid: 1,
916 };
917 let b = FileLock {
918 lock_type: F_WRLCK,
919 start: 50,
920 len: 50,
921 pid: 2,
922 };
923 assert!(!a.overlaps(&b));
924 assert!(!b.overlaps(&a));
925 }
926
927 #[test]
928 fn test_filelock_zero_len_overlaps_everything() {
929 let whole = FileLock {
930 lock_type: F_WRLCK,
931 start: 0,
932 len: 0,
933 pid: 1,
934 };
935 let small = FileLock {
936 lock_type: F_WRLCK,
937 start: 999999,
938 len: 1,
939 pid: 2,
940 };
941 assert!(whole.overlaps(&small));
942 assert!(small.overlaps(&whole));
943 }
944
945 #[test]
946 fn test_filelock_end() {
947 let lock = FileLock {
948 lock_type: F_WRLCK,
949 start: 10,
950 len: 20,
951 pid: 1,
952 };
953 assert_eq!(lock.end(), 30);
954 let whole = FileLock {
955 lock_type: F_WRLCK,
956 start: 0,
957 len: 0,
958 pid: 1,
959 };
960 assert_eq!(whole.end(), u64::MAX);
961 }
962}