1use alloc::{string::String, sync::Arc, vec::Vec};
4use core::sync::atomic::AtomicBool;
5
6#[cfg(not(target_arch = "aarch64"))]
7use spin::RwLock;
8
9#[cfg(target_arch = "aarch64")]
10use super::bare_lock::RwLock;
11use super::VfsNode;
12use crate::error::{FsError, KernelError};
13
14pub type FileDescriptor = usize;
16
17pub const STDIN: FileDescriptor = 0;
19pub const STDOUT: FileDescriptor = 1;
20pub const STDERR: FileDescriptor = 2;
21
22#[derive(Debug, Clone, Copy)]
24pub struct OpenFlags {
25 pub read: bool,
26 pub write: bool,
27 pub append: bool,
28 pub create: bool,
29 pub truncate: bool,
30 pub exclusive: bool,
31 pub nonblock: bool,
32}
33
34impl OpenFlags {
35 pub fn read_only() -> Self {
37 Self {
38 read: true,
39 write: false,
40 append: false,
41 create: false,
42 truncate: false,
43 exclusive: false,
44 nonblock: false,
45 }
46 }
47
48 pub fn write_only() -> Self {
50 Self {
51 read: false,
52 write: true,
53 append: false,
54 create: true,
55 truncate: true,
56 exclusive: false,
57 nonblock: false,
58 }
59 }
60
61 pub fn read_write() -> Self {
63 Self {
64 read: true,
65 write: true,
66 append: false,
67 create: true,
68 truncate: false,
69 exclusive: false,
70 nonblock: false,
71 }
72 }
73
74 pub fn append() -> Self {
76 Self {
77 read: false,
78 write: true,
79 append: true,
80 create: true,
81 truncate: false,
82 exclusive: false,
83 nonblock: false,
84 }
85 }
86
87 pub fn from_bits(bits: u32) -> Option<Self> {
92 const O_WRONLY: u32 = 0x0001;
94 const O_RDWR: u32 = 0x0002;
95 const O_ACCMODE: u32 = 0x0003;
96 const O_CREAT: u32 = 0x0040;
97 const O_EXCL: u32 = 0x0080;
98 const O_TRUNC: u32 = 0x0200;
99 const O_APPEND: u32 = 0x0400;
100 const O_NONBLOCK: u32 = 0x0800;
101
102 let access_mode = bits & O_ACCMODE;
103
104 Some(Self {
105 read: access_mode != O_WRONLY,
107 write: access_mode == O_WRONLY || access_mode == O_RDWR,
108 append: (bits & O_APPEND) != 0,
109 create: (bits & O_CREAT) != 0,
110 truncate: (bits & O_TRUNC) != 0,
111 exclusive: (bits & O_EXCL) != 0,
112 nonblock: (bits & O_NONBLOCK) != 0,
113 })
114 }
115}
116
117#[derive(Debug, Clone, Copy)]
119pub enum SeekFrom {
120 Start(usize),
121 Current(isize),
122 End(isize),
123}
124
125pub struct File {
127 pub node: Arc<dyn VfsNode>,
129
130 pub flags: OpenFlags,
132
133 pub nonblock: AtomicBool,
137
138 pub position: RwLock<usize>,
140
141 pub refcount: RwLock<usize>,
143
144 pub path: Option<String>,
147}
148
149impl File {
150 pub fn new(node: Arc<dyn VfsNode>, flags: OpenFlags) -> Self {
152 let nb = flags.nonblock;
153 Self {
154 node,
155 flags,
156 nonblock: AtomicBool::new(nb),
157 position: RwLock::new(0),
158 refcount: RwLock::new(1),
159 path: None,
160 }
161 }
162
163 pub fn new_with_path(node: Arc<dyn VfsNode>, flags: OpenFlags, path: String) -> Self {
165 let nb = flags.nonblock;
166 Self {
167 node,
168 flags,
169 nonblock: AtomicBool::new(nb),
170 position: RwLock::new(0),
171 refcount: RwLock::new(1),
172 path: Some(path),
173 }
174 }
175
176 pub fn read(&self, buffer: &mut [u8]) -> Result<usize, KernelError> {
178 if !self.flags.read {
179 return Err(KernelError::PermissionDenied {
180 operation: "read file not opened for reading",
181 });
182 }
183
184 let mut pos = self.position.write();
185 let bytes_read = self.node.read(*pos, buffer)?;
186 *pos += bytes_read;
187 Ok(bytes_read)
188 }
189
190 pub fn write(&self, data: &[u8]) -> Result<usize, KernelError> {
192 if !self.flags.write {
193 return Err(KernelError::PermissionDenied {
194 operation: "write file not opened for writing",
195 });
196 }
197
198 let mut pos = self.position.write();
199
200 if self.flags.append {
201 let metadata = self.node.metadata()?;
203 *pos = metadata.size;
204 }
205
206 let bytes_written = self.node.write(*pos, data)?;
207 *pos += bytes_written;
208 Ok(bytes_written)
209 }
210
211 pub fn seek(&self, from: SeekFrom) -> Result<usize, KernelError> {
213 let mut pos = self.position.write();
214
215 let new_pos = match from {
216 SeekFrom::Start(offset) => offset,
217 SeekFrom::Current(offset) => {
218 if offset < 0 {
219 pos.checked_sub((-offset) as usize)
220 .ok_or(KernelError::InvalidArgument {
221 name: "offset",
222 value: "seek before start of file",
223 })?
224 } else {
225 pos.checked_add(offset as usize)
226 .ok_or(KernelError::InvalidArgument {
227 name: "offset",
228 value: "seek overflow",
229 })?
230 }
231 }
232 SeekFrom::End(offset) => {
233 let metadata = self.node.metadata()?;
234 if offset < 0 {
235 metadata.size.checked_sub((-offset) as usize).ok_or(
236 KernelError::InvalidArgument {
237 name: "offset",
238 value: "seek before start of file",
239 },
240 )?
241 } else {
242 metadata.size.checked_add(offset as usize).ok_or(
243 KernelError::InvalidArgument {
244 name: "offset",
245 value: "seek overflow",
246 },
247 )?
248 }
249 }
250 };
251
252 *pos = new_pos;
253 Ok(new_pos)
254 }
255
256 pub fn tell(&self) -> usize {
258 *self.position.read()
259 }
260
261 pub fn inc_ref(&self) {
263 *self.refcount.write() += 1;
264 }
265
266 pub fn dec_ref(&self) -> usize {
268 let mut count = self.refcount.write();
269 *count = count.saturating_sub(1);
270 *count
271 }
272}
273
274pub struct FileEntry {
276 pub file: Arc<File>,
278 pub cloexec: bool,
280}
281
282pub struct FileTable {
284 files: RwLock<Vec<Option<FileEntry>>>,
286
287 next_fd: RwLock<FileDescriptor>,
289}
290
291impl FileTable {
292 pub fn new() -> Self {
294 let mut files = Vec::with_capacity(256);
295
296 files.push(None); files.push(None); files.push(None); Self {
302 files: RwLock::new(files),
303 next_fd: RwLock::new(3),
304 }
305 }
306}
307
308impl Default for FileTable {
309 fn default() -> Self {
310 Self::new()
311 }
312}
313
314impl FileTable {
315 pub fn open(&self, file: Arc<File>) -> Result<FileDescriptor, KernelError> {
317 self.open_with_flags(file, false)
318 }
319
320 pub fn open_with_flags(
322 &self,
323 file: Arc<File>,
324 cloexec: bool,
325 ) -> Result<FileDescriptor, KernelError> {
326 let mut files = self.files.write();
327 let mut next_fd = self.next_fd.write();
328
329 let entry = FileEntry { file, cloexec };
330
331 for (fd, slot) in files.iter_mut().enumerate() {
333 if slot.is_none() {
334 *slot = Some(entry);
335 return Ok(fd);
336 }
337 }
338
339 let fd = *next_fd;
341 if fd >= 1024 {
342 return Err(KernelError::FsError(FsError::TooManyOpenFiles));
343 }
344
345 files.push(Some(entry));
346 *next_fd += 1;
347 Ok(fd)
348 }
349
350 pub fn get(&self, fd: FileDescriptor) -> Option<Arc<File>> {
352 let files = self.files.read();
353 files.get(fd)?.as_ref().map(|entry| entry.file.clone())
354 }
355
356 pub fn get_entry(&self, fd: FileDescriptor) -> Option<(Arc<File>, bool)> {
358 let files = self.files.read();
359 files
360 .get(fd)?
361 .as_ref()
362 .map(|entry| (entry.file.clone(), entry.cloexec))
363 }
364
365 pub fn close(&self, fd: FileDescriptor) -> Result<(), KernelError> {
367 let mut files = self.files.write();
368
369 if fd >= files.len() {
370 return Err(KernelError::FsError(FsError::BadFileDescriptor));
371 }
372
373 if let Some(entry) = files[fd].take() {
374 if entry.file.dec_ref() == 0 {
376 }
378 Ok(())
379 } else {
380 Err(KernelError::FsError(FsError::BadFileDescriptor))
381 }
382 }
383
384 pub fn dup(&self, fd: FileDescriptor) -> Result<FileDescriptor, KernelError> {
386 let file = self
387 .get(fd)
388 .ok_or(KernelError::FsError(FsError::BadFileDescriptor))?;
389 file.inc_ref();
390 self.open(file)
392 }
393
394 pub fn dup_cloexec(&self, fd: FileDescriptor) -> Result<FileDescriptor, KernelError> {
396 let file = self
397 .get(fd)
398 .ok_or(KernelError::FsError(FsError::BadFileDescriptor))?;
399 file.inc_ref();
400 self.open_with_flags(file, true)
401 }
402
403 pub fn dup_at_least(
405 &self,
406 fd: FileDescriptor,
407 min_fd: FileDescriptor,
408 cloexec: bool,
409 ) -> Result<FileDescriptor, KernelError> {
410 let file = self
411 .get(fd)
412 .ok_or(KernelError::FsError(FsError::BadFileDescriptor))?;
413 file.inc_ref();
414
415 let mut files = self.files.write();
416 let mut next_fd = self.next_fd.write();
417
418 let entry = FileEntry { file, cloexec };
419
420 while files.len() <= min_fd {
422 files.push(None);
423 }
424 if *next_fd <= min_fd {
425 *next_fd = min_fd;
426 }
427
428 for slot_fd in min_fd..files.len() {
430 if files[slot_fd].is_none() {
431 files[slot_fd] = Some(entry);
432 return Ok(slot_fd);
433 }
434 }
435
436 let new_fd = *next_fd;
438 if new_fd >= 1024 {
439 return Err(KernelError::FsError(FsError::TooManyOpenFiles));
440 }
441
442 while files.len() <= new_fd {
444 files.push(None);
445 }
446 files[new_fd] = Some(entry);
447 *next_fd = new_fd + 1;
448 Ok(new_fd)
449 }
450
451 pub fn dup2(&self, old_fd: FileDescriptor, new_fd: FileDescriptor) -> Result<(), KernelError> {
453 if old_fd == new_fd {
455 if self.get(old_fd).is_none() {
457 return Err(KernelError::FsError(FsError::BadFileDescriptor));
458 }
459 return Ok(());
460 }
461
462 let file = self
463 .get(old_fd)
464 .ok_or(KernelError::FsError(FsError::BadFileDescriptor))?;
465 file.inc_ref();
466
467 let mut files = self.files.write();
468
469 while files.len() <= new_fd {
471 files.push(None);
472 }
473
474 if let Some(existing) = files[new_fd].take() {
476 existing.file.dec_ref();
477 }
478
479 files[new_fd] = Some(FileEntry {
481 file,
482 cloexec: false,
483 });
484 Ok(())
485 }
486
487 pub fn dup3(
489 &self,
490 old_fd: FileDescriptor,
491 new_fd: FileDescriptor,
492 cloexec: bool,
493 ) -> Result<(), KernelError> {
494 if old_fd == new_fd {
496 return Err(KernelError::InvalidArgument {
497 name: "new_fd",
498 value: "cannot be same as old_fd in dup3",
499 });
500 }
501
502 let file = self
503 .get(old_fd)
504 .ok_or(KernelError::FsError(FsError::BadFileDescriptor))?;
505 file.inc_ref();
506
507 let mut files = self.files.write();
508
509 while files.len() <= new_fd {
511 files.push(None);
512 }
513
514 if let Some(existing) = files[new_fd].take() {
516 existing.file.dec_ref();
517 }
518
519 files[new_fd] = Some(FileEntry { file, cloexec });
521 Ok(())
522 }
523
524 pub fn set_cloexec(&self, fd: FileDescriptor, cloexec: bool) -> Result<(), KernelError> {
526 let mut files = self.files.write();
527
528 if fd >= files.len() {
529 return Err(KernelError::FsError(FsError::BadFileDescriptor));
530 }
531
532 if let Some(entry) = files[fd].as_mut() {
533 entry.cloexec = cloexec;
534 Ok(())
535 } else {
536 Err(KernelError::FsError(FsError::BadFileDescriptor))
537 }
538 }
539
540 pub fn get_cloexec(&self, fd: FileDescriptor) -> Result<bool, KernelError> {
542 let files = self.files.read();
543
544 if fd >= files.len() {
545 return Err(KernelError::FsError(FsError::BadFileDescriptor));
546 }
547
548 if let Some(entry) = files[fd].as_ref() {
549 Ok(entry.cloexec)
550 } else {
551 Err(KernelError::FsError(FsError::BadFileDescriptor))
552 }
553 }
554
555 pub fn close_on_exec(&self) {
558 let mut files = self.files.write();
559
560 for slot in files.iter_mut() {
561 if let Some(entry) = slot.as_ref() {
562 if entry.cloexec {
563 if let Some(entry) = slot.take() {
565 entry.file.dec_ref();
566 }
567 }
568 }
569 }
570 }
571
572 pub fn count_open(&self) -> usize {
574 let files = self.files.read();
575 files.iter().filter(|slot| slot.is_some()).count()
576 }
577
578 pub fn clone_for_fork(&self) -> Self {
581 let files = self.files.read();
582 let next_fd = *self.next_fd.read();
583
584 let mut new_files = Vec::with_capacity(files.len());
585 for slot in files.iter() {
586 if let Some(entry) = slot {
587 entry.file.inc_ref();
588 new_files.push(Some(FileEntry {
589 file: entry.file.clone(),
590 cloexec: entry.cloexec,
591 }));
592 } else {
593 new_files.push(None);
594 }
595 }
596
597 Self {
598 files: RwLock::new(new_files),
599 next_fd: RwLock::new(next_fd),
600 }
601 }
602
603 pub fn close_all(&self) {
605 let mut files = self.files.write();
606
607 for slot in files.iter_mut() {
608 if let Some(entry) = slot.take() {
609 entry.file.dec_ref();
610 }
611 }
612 }
613}