1use core::sync::atomic::{self, Ordering};
27
28use spin::Mutex;
29
30use super::{
31 queue::{VirtQueue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE},
32 VirtioPciTransport, VirtioTransport,
33};
34use crate::{
35 error::KernelError,
36 mm::{FRAME_ALLOCATOR, FRAME_SIZE},
37 sync::once_lock::OnceLock,
38};
39
40pub const BLOCK_SIZE: usize = 512;
42
43#[allow(dead_code)] const MAX_SECTORS_PER_REQ: usize = 256;
46
47pub mod features {
49 pub const VIRTIO_BLK_F_SIZE_MAX: u32 = 1 << 1;
51 pub const VIRTIO_BLK_F_SEG_MAX: u32 = 1 << 2;
53 pub const VIRTIO_BLK_F_GEOMETRY: u32 = 1 << 4;
55 pub const VIRTIO_BLK_F_RO: u32 = 1 << 5;
57 pub const VIRTIO_BLK_F_BLK_SIZE: u32 = 1 << 6;
59 pub const VIRTIO_BLK_F_FLUSH: u32 = 1 << 9;
61}
62
63mod req_type {
65 pub const VIRTIO_BLK_T_IN: u32 = 0;
67 pub const VIRTIO_BLK_T_OUT: u32 = 1;
69 #[allow(dead_code)] pub const VIRTIO_BLK_T_FLUSH: u32 = 4;
72}
73
74mod blk_status {
76 pub const VIRTIO_BLK_S_OK: u8 = 0;
78 pub const VIRTIO_BLK_S_IOERR: u8 = 1;
80 pub const VIRTIO_BLK_S_UNSUPP: u8 = 2;
82}
83
84#[repr(C)]
87#[derive(Debug, Clone, Copy)]
88struct VirtioBlkReqHeader {
89 type_: u32,
91 reserved: u32,
93 sector: u64,
95}
96
97struct RequestBuffer {
102 header_phys: u64,
104 header_virt: usize,
106 data_phys: u64,
108 data_virt: usize,
110 status_phys: u64,
112 status_virt: usize,
114 frame: crate::mm::FrameNumber,
116}
117
118impl RequestBuffer {
119 fn new(data_len: usize) -> Result<Self, KernelError> {
126 let total = core::mem::size_of::<VirtioBlkReqHeader>() + data_len + 1;
127 if total > FRAME_SIZE {
128 return Err(KernelError::InvalidArgument {
129 name: "data_len",
130 value: "request buffer exceeds single frame",
131 });
132 }
133
134 let frame = FRAME_ALLOCATOR
135 .lock()
136 .allocate_frames(1, None)
137 .map_err(|_| KernelError::OutOfMemory {
138 requested: FRAME_SIZE,
139 available: 0,
140 })?;
141
142 let phys_base = frame.as_u64() * FRAME_SIZE as u64;
143 let virt_base = phys_to_kernel_virt(phys_base);
144
145 unsafe {
149 core::ptr::write_bytes(virt_base as *mut u8, 0, FRAME_SIZE);
150 }
151
152 let header_offset = 0;
153 let data_offset = core::mem::size_of::<VirtioBlkReqHeader>();
154 let status_offset = data_offset + data_len;
155
156 Ok(Self {
157 header_phys: phys_base + header_offset as u64,
158 header_virt: virt_base + header_offset,
159 data_phys: phys_base + data_offset as u64,
160 data_virt: virt_base + data_offset,
161 status_phys: phys_base + status_offset as u64,
162 status_virt: virt_base + status_offset,
163 frame,
164 })
165 }
166
167 fn write_header(&self, type_: u32, sector: u64) {
169 let header = VirtioBlkReqHeader {
170 type_,
171 reserved: 0,
172 sector,
173 };
174 unsafe {
177 core::ptr::write_volatile(self.header_virt as *mut VirtioBlkReqHeader, header);
178 }
179 }
180
181 fn write_data(&self, data: &[u8]) {
183 unsafe {
186 core::ptr::copy_nonoverlapping(data.as_ptr(), self.data_virt as *mut u8, data.len());
187 }
188 }
189
190 fn read_data(&self, buf: &mut [u8]) {
192 unsafe {
195 core::ptr::copy_nonoverlapping(
196 self.data_virt as *const u8,
197 buf.as_mut_ptr(),
198 buf.len(),
199 );
200 }
201 }
202
203 fn read_status(&self) -> u8 {
205 unsafe { core::ptr::read_volatile(self.status_virt as *const u8) }
207 }
208}
209
210impl Drop for RequestBuffer {
211 fn drop(&mut self) {
212 let _ = FRAME_ALLOCATOR.lock().free_frames(self.frame, 1);
213 }
214}
215
216pub struct VirtioBlkDevice {
220 transport: VirtioTransport,
222 queue: VirtQueue,
224 capacity_sectors: u64,
226 read_only: bool,
228 #[allow(dead_code)] features: u32,
231}
232
233impl VirtioBlkDevice {
234 pub fn new(io_base: u16) -> Result<Self, KernelError> {
243 let transport = VirtioTransport::Pci(VirtioPciTransport::new(io_base));
244
245 transport.begin_init();
247
248 let device_features = transport.read_device_features();
250 let accepted = device_features
251 & (features::VIRTIO_BLK_F_SIZE_MAX
252 | features::VIRTIO_BLK_F_SEG_MAX
253 | features::VIRTIO_BLK_F_RO
254 | features::VIRTIO_BLK_F_BLK_SIZE
255 | features::VIRTIO_BLK_F_FLUSH);
256 transport.write_guest_features(accepted);
257
258 let read_only = (accepted & features::VIRTIO_BLK_F_RO) != 0;
259
260 let _features_ok = transport.set_features_ok();
262
263 transport.select_queue(0);
265 let queue_size = transport.read_queue_size();
266 if queue_size == 0 {
267 return Err(KernelError::HardwareError {
268 device: "virtio-blk",
269 code: 0x01, });
271 }
272
273 let queue = VirtQueue::new(queue_size)?;
274 transport.write_queue_address(queue.pfn());
275 transport.write_queue_phys(queue.phys_desc(), queue.phys_avail(), queue.phys_used());
276 transport.set_queue_ready();
277
278 transport.set_driver_ok();
280
281 let capacity_sectors = transport.read_device_config_u64(0);
285
286 crate::println!(
287 "[VIRTIO-BLK] Initialized: {} sectors ({} KB), {}",
288 capacity_sectors,
289 capacity_sectors * BLOCK_SIZE as u64 / 1024,
290 if read_only { "read-only" } else { "read-write" }
291 );
292
293 Ok(Self {
294 transport,
295 queue,
296 capacity_sectors,
297 read_only,
298 features: accepted,
299 })
300 }
301
302 pub fn from_mmio(
304 transport: crate::drivers::virtio::mmio::VirtioMmioTransport,
305 queue: VirtQueue,
306 capacity_sectors: u64,
307 read_only: bool,
308 features: u32,
309 ) -> Self {
310 Self {
311 transport: VirtioTransport::Mmio(transport),
312 queue,
313 capacity_sectors,
314 read_only,
315 features,
316 }
317 }
318
319 pub fn capacity_sectors(&self) -> u64 {
321 self.capacity_sectors
322 }
323
324 pub fn capacity_bytes(&self) -> u64 {
326 self.capacity_sectors * BLOCK_SIZE as u64
327 }
328
329 pub fn is_read_only(&self) -> bool {
331 self.read_only
332 }
333
334 pub fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError> {
339 if buf.len() < BLOCK_SIZE {
340 return Err(KernelError::InvalidArgument {
341 name: "buf",
342 value: "buffer must be at least 512 bytes",
343 });
344 }
345 if block_num >= self.capacity_sectors {
346 return Err(KernelError::InvalidArgument {
347 name: "block_num",
348 value: "block number exceeds device capacity",
349 });
350 }
351
352 self.do_request(req_type::VIRTIO_BLK_T_IN, block_num, Some(buf), None)
353 }
354
355 pub fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError> {
360 if self.read_only {
361 return Err(KernelError::PermissionDenied {
362 operation: "write to read-only virtio-blk device",
363 });
364 }
365 if data.len() < BLOCK_SIZE {
366 return Err(KernelError::InvalidArgument {
367 name: "data",
368 value: "data must be at least 512 bytes",
369 });
370 }
371 if block_num >= self.capacity_sectors {
372 return Err(KernelError::InvalidArgument {
373 name: "block_num",
374 value: "block number exceeds device capacity",
375 });
376 }
377
378 self.do_request(req_type::VIRTIO_BLK_T_OUT, block_num, None, Some(data))
379 }
380
381 fn do_request(
386 &mut self,
387 type_: u32,
388 sector: u64,
389 read_buf: Option<&mut [u8]>,
390 write_data: Option<&[u8]>,
391 ) -> Result<(), KernelError> {
392 let data_len = BLOCK_SIZE;
393
394 let req_buf = RequestBuffer::new(data_len)?;
396
397 req_buf.write_header(type_, sector);
399
400 if let Some(data) = write_data {
402 req_buf.write_data(&data[..data_len]);
403 }
404
405 let desc_header = self
411 .queue
412 .alloc_desc()
413 .ok_or(KernelError::ResourceExhausted {
414 resource: "virtio-blk descriptors",
415 })?;
416 let desc_data = match self.queue.alloc_desc() {
417 Some(d) => d,
418 None => {
419 self.queue.free_desc(desc_header);
420 return Err(KernelError::ResourceExhausted {
421 resource: "virtio-blk descriptors",
422 });
423 }
424 };
425 let desc_status = match self.queue.alloc_desc() {
426 Some(d) => d,
427 None => {
428 self.queue.free_desc(desc_header);
429 self.queue.free_desc(desc_data);
430 return Err(KernelError::ResourceExhausted {
431 resource: "virtio-blk descriptors",
432 });
433 }
434 };
435
436 unsafe {
440 self.queue.write_desc(
441 desc_header,
442 req_buf.header_phys,
443 core::mem::size_of::<VirtioBlkReqHeader>() as u32,
444 VIRTQ_DESC_F_NEXT,
445 desc_data,
446 );
447 }
448
449 let data_flags = if type_ == req_type::VIRTIO_BLK_T_IN {
451 VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT } else {
453 VIRTQ_DESC_F_NEXT };
455 unsafe {
458 self.queue.write_desc(
459 desc_data,
460 req_buf.data_phys,
461 data_len as u32,
462 data_flags,
463 desc_status,
464 );
465 }
466
467 unsafe {
470 self.queue
471 .write_desc(desc_status, req_buf.status_phys, 1, VIRTQ_DESC_F_WRITE, 0);
472 }
473
474 atomic::fence(Ordering::Release);
476
477 self.queue.push_avail(desc_header);
479
480 self.transport.notify_queue(0);
482
483 let mut spins: u32 = 0;
485 const MAX_SPINS: u32 = 10_000_000;
486 while !self.queue.has_used() {
487 core::hint::spin_loop();
488 spins += 1;
489 if spins >= MAX_SPINS {
490 self.queue.free_chain(desc_header);
492 return Err(KernelError::Timeout {
493 operation: "virtio-blk request",
494 duration_ms: 0,
495 });
496 }
497 }
498
499 let (_used_id, _used_len) = self.queue.poll_used().ok_or(KernelError::HardwareError {
501 device: "virtio-blk",
502 code: 0x02, })?;
504
505 let status = req_buf.read_status();
507 match status {
508 blk_status::VIRTIO_BLK_S_OK => {}
509 blk_status::VIRTIO_BLK_S_IOERR => {
510 self.queue.free_chain(desc_header);
511 return Err(KernelError::HardwareError {
512 device: "virtio-blk",
513 code: 0x10, });
515 }
516 blk_status::VIRTIO_BLK_S_UNSUPP => {
517 self.queue.free_chain(desc_header);
518 return Err(KernelError::OperationNotSupported {
519 operation: "virtio-blk unsupported request type",
520 });
521 }
522 _ => {
523 self.queue.free_chain(desc_header);
524 return Err(KernelError::HardwareError {
525 device: "virtio-blk",
526 code: status as u32,
527 });
528 }
529 }
530
531 if let Some(buf) = read_buf {
533 req_buf.read_data(&mut buf[..data_len]);
534 }
535
536 self.queue.free_chain(desc_header);
538
539 Ok(())
541 }
542}
543
544pub trait BlockDevice: Send + Sync {
546 fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError>;
548
549 fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError>;
551
552 fn capacity_sectors(&self) -> u64;
554
555 fn block_size(&self) -> usize {
557 BLOCK_SIZE
558 }
559
560 fn is_read_only(&self) -> bool;
562}
563
564impl BlockDevice for VirtioBlkDevice {
565 fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError> {
566 VirtioBlkDevice::read_block(self, block_num, buf)
567 }
568
569 fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError> {
570 VirtioBlkDevice::write_block(self, block_num, data)
571 }
572
573 fn capacity_sectors(&self) -> u64 {
574 self.capacity_sectors
575 }
576
577 fn is_read_only(&self) -> bool {
578 self.read_only
579 }
580}
581
582static VIRTIO_BLK: OnceLock<Mutex<VirtioBlkDevice>> = OnceLock::new();
588
589pub fn init() {
595 #[cfg(target_arch = "x86_64")]
596 init_x86_64();
597
598 #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
599 init_mmio();
600}
601
602#[cfg(target_arch = "x86_64")]
604fn init_x86_64() {
605 use crate::drivers::pci;
606
607 if !pci::is_pci_initialized() {
608 crate::println!("[VIRTIO-BLK] PCI bus not initialized, skipping");
609 return;
610 }
611
612 let pci_bus = pci::get_pci_bus().lock();
613
614 let all_devices = pci_bus.get_all_devices();
617 drop(pci_bus); for device in &all_devices {
620 if device.vendor_id != super::VIRTIO_VENDOR_ID {
621 continue;
622 }
623 if device.device_id != super::VIRTIO_BLK_DEVICE_ID_LEGACY
624 && device.device_id != super::VIRTIO_BLK_DEVICE_ID_MODERN
625 {
626 continue;
627 }
628
629 crate::println!(
630 "[VIRTIO-BLK] Found device at {}:{}:{} (ID {:04x}:{:04x})",
631 device.location.bus,
632 device.location.device,
633 device.location.function,
634 device.vendor_id,
635 device.device_id,
636 );
637
638 let io_base = match device.bars.first() {
640 Some(bar) => match bar.get_io_address() {
641 Some(addr) => addr as u16,
642 None => {
643 crate::println!("[VIRTIO-BLK] BAR0 is not an I/O BAR, skipping device");
646 continue;
647 }
648 },
649 None => {
650 crate::println!("[VIRTIO-BLK] No BAR0 found, skipping device");
651 continue;
652 }
653 };
654
655 enable_bus_master(device);
657
658 match VirtioBlkDevice::new(io_base) {
659 Ok(dev) => {
660 let _ = VIRTIO_BLK.set(Mutex::new(dev));
661 crate::println!("[VIRTIO-BLK] Device initialized and registered");
662 }
663 Err(e) => {
664 crate::println!("[VIRTIO-BLK] Failed to initialize device: {:?}", e);
665 }
666 }
667
668 return;
670 }
671
672 crate::println!("[VIRTIO-BLK] No virtio-blk devices found on PCI bus");
673}
674
675#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
682fn init_mmio() {
683 use crate::drivers::virtio::mmio::{try_init_mmio_blk, DEFAULT_BASES};
684
685 for base in DEFAULT_BASES {
687 match try_init_mmio_blk(base) {
688 Ok(dev) => {
689 if VIRTIO_BLK.set(Mutex::new(dev)).is_ok() {
690 crate::println!("[VIRTIO-BLK/MMIO] Device initialized at base {:#x}", base);
691 return;
692 }
693 }
694 Err(_) => continue,
695 }
696 }
697
698 crate::println!("[VIRTIO-BLK/MMIO] No virtio-blk mmio device detected");
699}
700
701#[cfg(target_arch = "x86_64")]
703fn enable_bus_master(device: &crate::drivers::pci::PciDevice) {
704 let loc = device.location;
705 let config_addr = loc.to_config_address() | (0x04 & 0xFC); unsafe {
710 crate::arch::outl(0xCF8, config_addr);
711 let cmd = crate::arch::inl(0xCFC);
712 let new_cmd = (cmd & 0xFFFF) | 0x07;
716 crate::arch::outl(0xCF8, config_addr);
717 crate::arch::outl(0xCFC, new_cmd);
718 }
719}
720
721pub fn get_device() -> Option<&'static Mutex<VirtioBlkDevice>> {
723 VIRTIO_BLK.get()
724}
725
726pub fn is_initialized() -> bool {
728 VIRTIO_BLK.get().is_some()
729}
730
731fn phys_to_kernel_virt(phys: u64) -> usize {
733 #[cfg(target_arch = "x86_64")]
734 {
735 if let Some(virt) = crate::arch::x86_64::msr::phys_to_virt(phys as usize) {
736 return virt;
737 }
738 (phys + 0xFFFF_8000_0000_0000) as usize
739 }
740
741 #[cfg(not(target_arch = "x86_64"))]
742 {
743 phys as usize
744 }
745}