⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/virtio/
blk.rs

1//! Virtio-blk device driver
2//!
3//! Implements a block device driver for virtio-blk PCI devices as described
4//! in the virtio specification, section 5.2. Supports read and write operations
5//! using the legacy (transitional) PCI transport.
6//!
7//! # Virtio-blk request format
8//!
9//! Each request is a three-descriptor chain:
10//!
11//! 1. **Header** (device-readable): `VirtioBlkReqHeader` with request type +
12//!    sector
13//! 2. **Data** (device-readable for write, device-writable for read): sector
14//!    data
15//! 3. **Status** (device-writable): single byte result (0 = OK, 1 = IOERR, 2 =
16//!    UNSUPP)
17//!
18//! # QEMU usage
19//!
20//! ```text
21//! -drive file=disk.img,if=none,id=vd0,format=raw -device virtio-blk-pci,drive=vd0
22//! ```
23
24// Virtio-blk driver -- exercised when block device is attached
25
26use core::sync::atomic::{self, Ordering};
27
28use spin::Mutex;
29
30use super::{
31    queue::{VirtQueue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE},
32    VirtioPciTransport, VirtioTransport,
33};
34use crate::{
35    error::KernelError,
36    mm::{FRAME_ALLOCATOR, FRAME_SIZE},
37    sync::once_lock::OnceLock,
38};
39
40/// Block size in bytes (standard sector)
41pub const BLOCK_SIZE: usize = 512;
42
43/// Maximum number of sectors per single request
44#[allow(dead_code)] // Virtio-blk request size limit per spec
45const MAX_SECTORS_PER_REQ: usize = 256;
46
47/// Virtio-blk feature bits (virtio spec 5.2.3)
48pub mod features {
49    /// Maximum size of any single segment is in `size_max`.
50    pub const VIRTIO_BLK_F_SIZE_MAX: u32 = 1 << 1;
51    /// Maximum number of segments in a request is in `seg_max`.
52    pub const VIRTIO_BLK_F_SEG_MAX: u32 = 1 << 2;
53    /// Disk-style geometry specified in geometry.
54    pub const VIRTIO_BLK_F_GEOMETRY: u32 = 1 << 4;
55    /// Device is read-only.
56    pub const VIRTIO_BLK_F_RO: u32 = 1 << 5;
57    /// Block size of disk is in `blk_size`.
58    pub const VIRTIO_BLK_F_BLK_SIZE: u32 = 1 << 6;
59    /// Cache flush command support.
60    pub const VIRTIO_BLK_F_FLUSH: u32 = 1 << 9;
61}
62
63/// Virtio-blk request types (virtio spec 5.2.6)
64mod req_type {
65    /// Read sectors from the device
66    pub const VIRTIO_BLK_T_IN: u32 = 0;
67    /// Write sectors to the device
68    pub const VIRTIO_BLK_T_OUT: u32 = 1;
69    /// Flush volatile write cache
70    #[allow(dead_code)] // Virtio-blk command type per spec
71    pub const VIRTIO_BLK_T_FLUSH: u32 = 4;
72}
73
74/// Virtio-blk status values (returned in the status byte)
75mod blk_status {
76    /// Request completed successfully
77    pub const VIRTIO_BLK_S_OK: u8 = 0;
78    /// I/O error
79    pub const VIRTIO_BLK_S_IOERR: u8 = 1;
80    /// Unsupported request type
81    pub const VIRTIO_BLK_S_UNSUPP: u8 = 2;
82}
83
84/// Virtio-blk request header, sent as the first descriptor in each request
85/// chain.
86#[repr(C)]
87#[derive(Debug, Clone, Copy)]
88struct VirtioBlkReqHeader {
89    /// Request type: VIRTIO_BLK_T_IN (read) or VIRTIO_BLK_T_OUT (write)
90    type_: u32,
91    /// Reserved field (must be zero)
92    reserved: u32,
93    /// Starting sector (512-byte units)
94    sector: u64,
95}
96
97/// A DMA buffer for a single virtio-blk request.
98///
99/// Holds the physical memory for the header, data, and status byte so they
100/// remain valid while the device processes the request.
101struct RequestBuffer {
102    /// Physical address of the header
103    header_phys: u64,
104    /// Virtual address of the header (for writing from CPU)
105    header_virt: usize,
106    /// Physical address of the data region
107    data_phys: u64,
108    /// Virtual address of the data region
109    data_virt: usize,
110    /// Physical address of the status byte
111    status_phys: u64,
112    /// Virtual address of the status byte
113    status_virt: usize,
114    /// Frame allocated for the request buffer
115    frame: crate::mm::FrameNumber,
116}
117
118impl RequestBuffer {
119    /// Allocate a request buffer from the frame allocator.
120    ///
121    /// Layout within a single 4KB frame:
122    /// - [0..16): VirtioBlkReqHeader (16 bytes)
123    /// - [16..16+data_len): Data buffer
124    /// - [16+data_len]: Status byte (1 byte)
125    fn new(data_len: usize) -> Result<Self, KernelError> {
126        let total = core::mem::size_of::<VirtioBlkReqHeader>() + data_len + 1;
127        if total > FRAME_SIZE {
128            return Err(KernelError::InvalidArgument {
129                name: "data_len",
130                value: "request buffer exceeds single frame",
131            });
132        }
133
134        let frame = FRAME_ALLOCATOR
135            .lock()
136            .allocate_frames(1, None)
137            .map_err(|_| KernelError::OutOfMemory {
138                requested: FRAME_SIZE,
139                available: 0,
140            })?;
141
142        let phys_base = frame.as_u64() * FRAME_SIZE as u64;
143        let virt_base = phys_to_kernel_virt(phys_base);
144
145        // Zero the frame
146        // SAFETY: virt_base points to a freshly allocated, kernel-accessible
147        // frame. No other references exist.
148        unsafe {
149            core::ptr::write_bytes(virt_base as *mut u8, 0, FRAME_SIZE);
150        }
151
152        let header_offset = 0;
153        let data_offset = core::mem::size_of::<VirtioBlkReqHeader>();
154        let status_offset = data_offset + data_len;
155
156        Ok(Self {
157            header_phys: phys_base + header_offset as u64,
158            header_virt: virt_base + header_offset,
159            data_phys: phys_base + data_offset as u64,
160            data_virt: virt_base + data_offset,
161            status_phys: phys_base + status_offset as u64,
162            status_virt: virt_base + status_offset,
163            frame,
164        })
165    }
166
167    /// Write the request header.
168    fn write_header(&self, type_: u32, sector: u64) {
169        let header = VirtioBlkReqHeader {
170            type_,
171            reserved: 0,
172            sector,
173        };
174        // SAFETY: header_virt points to valid memory within our allocated frame.
175        // No other references to this memory exist.
176        unsafe {
177            core::ptr::write_volatile(self.header_virt as *mut VirtioBlkReqHeader, header);
178        }
179    }
180
181    /// Write data into the data region (for write requests).
182    fn write_data(&self, data: &[u8]) {
183        // SAFETY: data_virt points to valid memory within our allocated frame,
184        // with at least `data.len()` bytes available (checked at construction).
185        unsafe {
186            core::ptr::copy_nonoverlapping(data.as_ptr(), self.data_virt as *mut u8, data.len());
187        }
188    }
189
190    /// Read data from the data region (for read requests).
191    fn read_data(&self, buf: &mut [u8]) {
192        // SAFETY: data_virt points to valid memory written by the device.
193        // buf.len() does not exceed the allocated data region.
194        unsafe {
195            core::ptr::copy_nonoverlapping(
196                self.data_virt as *const u8,
197                buf.as_mut_ptr(),
198                buf.len(),
199            );
200        }
201    }
202
203    /// Read the status byte.
204    fn read_status(&self) -> u8 {
205        // SAFETY: status_virt points to a valid byte written by the device.
206        unsafe { core::ptr::read_volatile(self.status_virt as *const u8) }
207    }
208}
209
210impl Drop for RequestBuffer {
211    fn drop(&mut self) {
212        let _ = FRAME_ALLOCATOR.lock().free_frames(self.frame, 1);
213    }
214}
215
216/// Virtio block device.
217///
218/// Manages a single virtio-blk PCI device with one request virtqueue (queue 0).
219pub struct VirtioBlkDevice {
220    /// Transport handle (PCI or MMIO)
221    transport: VirtioTransport,
222    /// Request virtqueue (queue index 0)
223    queue: VirtQueue,
224    /// Device capacity in 512-byte sectors
225    capacity_sectors: u64,
226    /// Whether the device is read-only (VIRTIO_BLK_F_RO)
227    read_only: bool,
228    /// Negotiated features
229    #[allow(dead_code)] // Negotiated feature bits for device capabilities
230    features: u32,
231}
232
233impl VirtioBlkDevice {
234    /// Probe and initialize a virtio-blk device at the given PCI BAR0 I/O base.
235    ///
236    /// Performs the full legacy virtio initialization sequence:
237    /// 1. Reset + ACKNOWLEDGE + DRIVER
238    /// 2. Read and negotiate features
239    /// 3. Set up virtqueue 0 (request queue)
240    /// 4. Set FEATURES_OK + DRIVER_OK
241    /// 5. Read device configuration (capacity)
242    pub fn new(io_base: u16) -> Result<Self, KernelError> {
243        let transport = VirtioTransport::Pci(VirtioPciTransport::new(io_base));
244
245        // Step 1-2: Begin initialization (reset + ACKNOWLEDGE + DRIVER)
246        transport.begin_init();
247
248        // Step 3: Read and negotiate features
249        let device_features = transport.read_device_features();
250        let accepted = device_features
251            & (features::VIRTIO_BLK_F_SIZE_MAX
252                | features::VIRTIO_BLK_F_SEG_MAX
253                | features::VIRTIO_BLK_F_RO
254                | features::VIRTIO_BLK_F_BLK_SIZE
255                | features::VIRTIO_BLK_F_FLUSH);
256        transport.write_guest_features(accepted);
257
258        let read_only = (accepted & features::VIRTIO_BLK_F_RO) != 0;
259
260        // Step 4: Set FEATURES_OK (legacy devices may not support this; proceed anyway)
261        let _features_ok = transport.set_features_ok();
262
263        // Step 5: Set up virtqueue 0
264        transport.select_queue(0);
265        let queue_size = transport.read_queue_size();
266        if queue_size == 0 {
267            return Err(KernelError::HardwareError {
268                device: "virtio-blk",
269                code: 0x01, // Queue size is zero -- no queue available
270            });
271        }
272
273        let queue = VirtQueue::new(queue_size)?;
274        transport.write_queue_address(queue.pfn());
275        transport.write_queue_phys(queue.phys_desc(), queue.phys_avail(), queue.phys_used());
276        transport.set_queue_ready();
277
278        // Step 6: Set DRIVER_OK -- device is live
279        transport.set_driver_ok();
280
281        // Step 7: Read device configuration -- capacity in sectors
282        // Legacy virtio-blk config starts at offset 0x14 (after common registers):
283        //   offset 0x00 (relative to config base): capacity (u64, in 512-byte sectors)
284        let capacity_sectors = transport.read_device_config_u64(0);
285
286        crate::println!(
287            "[VIRTIO-BLK] Initialized: {} sectors ({} KB), {}",
288            capacity_sectors,
289            capacity_sectors * BLOCK_SIZE as u64 / 1024,
290            if read_only { "read-only" } else { "read-write" }
291        );
292
293        Ok(Self {
294            transport,
295            queue,
296            capacity_sectors,
297            read_only,
298            features: accepted,
299        })
300    }
301
302    /// Construct from an MMIO transport + queue (used on AArch64/RISC-V).
303    pub fn from_mmio(
304        transport: crate::drivers::virtio::mmio::VirtioMmioTransport,
305        queue: VirtQueue,
306        capacity_sectors: u64,
307        read_only: bool,
308        features: u32,
309    ) -> Self {
310        Self {
311            transport: VirtioTransport::Mmio(transport),
312            queue,
313            capacity_sectors,
314            read_only,
315            features,
316        }
317    }
318
319    /// Get device capacity in 512-byte sectors.
320    pub fn capacity_sectors(&self) -> u64 {
321        self.capacity_sectors
322    }
323
324    /// Get device capacity in bytes.
325    pub fn capacity_bytes(&self) -> u64 {
326        self.capacity_sectors * BLOCK_SIZE as u64
327    }
328
329    /// Check if the device is read-only.
330    pub fn is_read_only(&self) -> bool {
331        self.read_only
332    }
333
334    /// Read a single block (512 bytes) from the device.
335    ///
336    /// `block_num` is the 0-based sector number. `buf` must be at least 512
337    /// bytes.
338    pub fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError> {
339        if buf.len() < BLOCK_SIZE {
340            return Err(KernelError::InvalidArgument {
341                name: "buf",
342                value: "buffer must be at least 512 bytes",
343            });
344        }
345        if block_num >= self.capacity_sectors {
346            return Err(KernelError::InvalidArgument {
347                name: "block_num",
348                value: "block number exceeds device capacity",
349            });
350        }
351
352        self.do_request(req_type::VIRTIO_BLK_T_IN, block_num, Some(buf), None)
353    }
354
355    /// Write a single block (512 bytes) to the device.
356    ///
357    /// `block_num` is the 0-based sector number. `data` must be at least 512
358    /// bytes.
359    pub fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError> {
360        if self.read_only {
361            return Err(KernelError::PermissionDenied {
362                operation: "write to read-only virtio-blk device",
363            });
364        }
365        if data.len() < BLOCK_SIZE {
366            return Err(KernelError::InvalidArgument {
367                name: "data",
368                value: "data must be at least 512 bytes",
369            });
370        }
371        if block_num >= self.capacity_sectors {
372            return Err(KernelError::InvalidArgument {
373                name: "block_num",
374                value: "block number exceeds device capacity",
375            });
376        }
377
378        self.do_request(req_type::VIRTIO_BLK_T_OUT, block_num, None, Some(data))
379    }
380
381    /// Submit a block request and poll for completion.
382    ///
383    /// For IN (read): `read_buf` receives the data after completion.
384    /// For OUT (write): `write_data` provides the data to write.
385    fn do_request(
386        &mut self,
387        type_: u32,
388        sector: u64,
389        read_buf: Option<&mut [u8]>,
390        write_data: Option<&[u8]>,
391    ) -> Result<(), KernelError> {
392        let data_len = BLOCK_SIZE;
393
394        // Allocate DMA buffer for the request
395        let req_buf = RequestBuffer::new(data_len)?;
396
397        // Fill in the request header
398        req_buf.write_header(type_, sector);
399
400        // For write requests, copy data into the DMA buffer
401        if let Some(data) = write_data {
402            req_buf.write_data(&data[..data_len]);
403        }
404
405        // Build a 3-descriptor chain:
406        //   [0] header (device-readable)
407        //   [1] data   (device-writable for read, device-readable for write)
408        //   [2] status (device-writable)
409
410        let desc_header = self
411            .queue
412            .alloc_desc()
413            .ok_or(KernelError::ResourceExhausted {
414                resource: "virtio-blk descriptors",
415            })?;
416        let desc_data = match self.queue.alloc_desc() {
417            Some(d) => d,
418            None => {
419                self.queue.free_desc(desc_header);
420                return Err(KernelError::ResourceExhausted {
421                    resource: "virtio-blk descriptors",
422                });
423            }
424        };
425        let desc_status = match self.queue.alloc_desc() {
426            Some(d) => d,
427            None => {
428                self.queue.free_desc(desc_header);
429                self.queue.free_desc(desc_data);
430                return Err(KernelError::ResourceExhausted {
431                    resource: "virtio-blk descriptors",
432                });
433            }
434        };
435
436        // Descriptor 0: Header (device-readable, chained to data)
437        // SAFETY: desc_header is a valid allocated descriptor index. header_phys
438        // points to a valid VirtioBlkReqHeader in DMA-accessible memory.
439        unsafe {
440            self.queue.write_desc(
441                desc_header,
442                req_buf.header_phys,
443                core::mem::size_of::<VirtioBlkReqHeader>() as u32,
444                VIRTQ_DESC_F_NEXT,
445                desc_data,
446            );
447        }
448
449        // Descriptor 1: Data (direction depends on request type)
450        let data_flags = if type_ == req_type::VIRTIO_BLK_T_IN {
451            VIRTQ_DESC_F_WRITE | VIRTQ_DESC_F_NEXT // Device writes data
452        } else {
453            VIRTQ_DESC_F_NEXT // Device reads data (driver-written)
454        };
455        // SAFETY: desc_data is a valid allocated descriptor. data_phys points
456        // to valid DMA memory of at least data_len bytes.
457        unsafe {
458            self.queue.write_desc(
459                desc_data,
460                req_buf.data_phys,
461                data_len as u32,
462                data_flags,
463                desc_status,
464            );
465        }
466
467        // Descriptor 2: Status (device-writable, end of chain)
468        // SAFETY: desc_status is valid. status_phys points to 1 byte of DMA memory.
469        unsafe {
470            self.queue
471                .write_desc(desc_status, req_buf.status_phys, 1, VIRTQ_DESC_F_WRITE, 0);
472        }
473
474        // Ensure all descriptor writes are visible before notifying
475        atomic::fence(Ordering::Release);
476
477        // Push the chain head onto the available ring
478        self.queue.push_avail(desc_header);
479
480        // Notify the device
481        self.transport.notify_queue(0);
482
483        // Poll for completion
484        let mut spins: u32 = 0;
485        const MAX_SPINS: u32 = 10_000_000;
486        while !self.queue.has_used() {
487            core::hint::spin_loop();
488            spins += 1;
489            if spins >= MAX_SPINS {
490                // Free descriptors before returning error
491                self.queue.free_chain(desc_header);
492                return Err(KernelError::Timeout {
493                    operation: "virtio-blk request",
494                    duration_ms: 0,
495                });
496            }
497        }
498
499        // Consume the used entry
500        let (_used_id, _used_len) = self.queue.poll_used().ok_or(KernelError::HardwareError {
501            device: "virtio-blk",
502            code: 0x02, // Used ring empty after has_used() returned true
503        })?;
504
505        // Check status byte
506        let status = req_buf.read_status();
507        match status {
508            blk_status::VIRTIO_BLK_S_OK => {}
509            blk_status::VIRTIO_BLK_S_IOERR => {
510                self.queue.free_chain(desc_header);
511                return Err(KernelError::HardwareError {
512                    device: "virtio-blk",
513                    code: 0x10, // I/O error
514                });
515            }
516            blk_status::VIRTIO_BLK_S_UNSUPP => {
517                self.queue.free_chain(desc_header);
518                return Err(KernelError::OperationNotSupported {
519                    operation: "virtio-blk unsupported request type",
520                });
521            }
522            _ => {
523                self.queue.free_chain(desc_header);
524                return Err(KernelError::HardwareError {
525                    device: "virtio-blk",
526                    code: status as u32,
527                });
528            }
529        }
530
531        // For read requests, copy data back to the caller's buffer
532        if let Some(buf) = read_buf {
533            req_buf.read_data(&mut buf[..data_len]);
534        }
535
536        // Free the descriptor chain
537        self.queue.free_chain(desc_header);
538
539        // req_buf is dropped here, freeing the DMA frame
540        Ok(())
541    }
542}
543
544/// Block device trait for generic block I/O operations.
545pub trait BlockDevice: Send + Sync {
546    /// Read a block (512 bytes) at the given sector number.
547    fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError>;
548
549    /// Write a block (512 bytes) at the given sector number.
550    fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError>;
551
552    /// Get the device capacity in sectors.
553    fn capacity_sectors(&self) -> u64;
554
555    /// Get the block size in bytes.
556    fn block_size(&self) -> usize {
557        BLOCK_SIZE
558    }
559
560    /// Check if the device is read-only.
561    fn is_read_only(&self) -> bool;
562}
563
564impl BlockDevice for VirtioBlkDevice {
565    fn read_block(&mut self, block_num: u64, buf: &mut [u8]) -> Result<(), KernelError> {
566        VirtioBlkDevice::read_block(self, block_num, buf)
567    }
568
569    fn write_block(&mut self, block_num: u64, data: &[u8]) -> Result<(), KernelError> {
570        VirtioBlkDevice::write_block(self, block_num, data)
571    }
572
573    fn capacity_sectors(&self) -> u64 {
574        self.capacity_sectors
575    }
576
577    fn is_read_only(&self) -> bool {
578        self.read_only
579    }
580}
581
582// ---------------------------------------------------------------------------
583// Global driver instance and initialization
584// ---------------------------------------------------------------------------
585
586/// Global virtio-blk device instance (if a device was found and initialized).
587static VIRTIO_BLK: OnceLock<Mutex<VirtioBlkDevice>> = OnceLock::new();
588
589/// Probe PCI bus for virtio-blk devices and initialize the first one found.
590///
591/// This is only meaningful on x86_64 where PCI I/O port access works. On
592/// AArch64 and RISC-V, this function is a no-op stub (virtio-mmio transport
593/// would be needed instead).
594pub fn init() {
595    #[cfg(target_arch = "x86_64")]
596    init_x86_64();
597
598    #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
599    init_mmio();
600}
601
602/// x86_64 PCI-based virtio-blk initialization.
603#[cfg(target_arch = "x86_64")]
604fn init_x86_64() {
605    use crate::drivers::pci;
606
607    if !pci::is_pci_initialized() {
608        crate::println!("[VIRTIO-BLK] PCI bus not initialized, skipping");
609        return;
610    }
611
612    let pci_bus = pci::get_pci_bus().lock();
613
614    // Search for virtio-blk devices (vendor 0x1AF4, device 0x1001 or 0x1042)
615    // We only support one virtio-blk device; stop after the first successful init.
616    let all_devices = pci_bus.get_all_devices();
617    drop(pci_bus); // Release PCI lock before performing device init
618
619    for device in &all_devices {
620        if device.vendor_id != super::VIRTIO_VENDOR_ID {
621            continue;
622        }
623        if device.device_id != super::VIRTIO_BLK_DEVICE_ID_LEGACY
624            && device.device_id != super::VIRTIO_BLK_DEVICE_ID_MODERN
625        {
626            continue;
627        }
628
629        crate::println!(
630            "[VIRTIO-BLK] Found device at {}:{}:{} (ID {:04x}:{:04x})",
631            device.location.bus,
632            device.location.device,
633            device.location.function,
634            device.vendor_id,
635            device.device_id,
636        );
637
638        // Get BAR0 I/O port address
639        let io_base = match device.bars.first() {
640            Some(bar) => match bar.get_io_address() {
641                Some(addr) => addr as u16,
642                None => {
643                    // Legacy devices sometimes use I/O BARs,
644                    // but QEMU may present an I/O BAR at BAR0.
645                    crate::println!("[VIRTIO-BLK] BAR0 is not an I/O BAR, skipping device");
646                    continue;
647                }
648            },
649            None => {
650                crate::println!("[VIRTIO-BLK] No BAR0 found, skipping device");
651                continue;
652            }
653        };
654
655        // Enable I/O space, memory space, and bus mastering
656        enable_bus_master(device);
657
658        match VirtioBlkDevice::new(io_base) {
659            Ok(dev) => {
660                let _ = VIRTIO_BLK.set(Mutex::new(dev));
661                crate::println!("[VIRTIO-BLK] Device initialized and registered");
662            }
663            Err(e) => {
664                crate::println!("[VIRTIO-BLK] Failed to initialize device: {:?}", e);
665            }
666        }
667
668        // We only support one virtio-blk device for now
669        return;
670    }
671
672    crate::println!("[VIRTIO-BLK] No virtio-blk devices found on PCI bus");
673}
674
675/// AArch64 / RISC-V virtio-mmio initialization.
676///
677/// Probes the architecture-specific MMIO base addresses for a virtio-blk
678/// device. On AArch64, these are at 0x0A00_0000 with 0x200 stride; on
679/// RISC-V, at 0x1000_1000 with 0x1000 stride. See
680/// [`super::mmio::DEFAULT_BASES`].
681#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
682fn init_mmio() {
683    use crate::drivers::virtio::mmio::{try_init_mmio_blk, DEFAULT_BASES};
684
685    // Probe the standard virtio-mmio base addresses exposed by QEMU virt.
686    for base in DEFAULT_BASES {
687        match try_init_mmio_blk(base) {
688            Ok(dev) => {
689                if VIRTIO_BLK.set(Mutex::new(dev)).is_ok() {
690                    crate::println!("[VIRTIO-BLK/MMIO] Device initialized at base {:#x}", base);
691                    return;
692                }
693            }
694            Err(_) => continue,
695        }
696    }
697
698    crate::println!("[VIRTIO-BLK/MMIO] No virtio-blk mmio device detected");
699}
700
701/// Enable PCI I/O space, memory space, and bus mastering for a device.
702#[cfg(target_arch = "x86_64")]
703fn enable_bus_master(device: &crate::drivers::pci::PciDevice) {
704    let loc = device.location;
705    let config_addr = loc.to_config_address() | (0x04 & 0xFC); // Command register at offset 0x04
706
707    // SAFETY: Reading and writing PCI configuration space via mechanism #1
708    // (ports 0xCF8/0xCFC). We are in kernel mode with full I/O privilege.
709    unsafe {
710        crate::arch::outl(0xCF8, config_addr);
711        let cmd = crate::arch::inl(0xCFC);
712        // Set bit 0 (I/O Space), bit 1 (Memory Space), bit 2 (Bus Master)
713        // Only modify the lower 16 bits (Command register); preserve upper
714        // 16 bits (Status register) as zeros to avoid W1C side-effects.
715        let new_cmd = (cmd & 0xFFFF) | 0x07;
716        crate::arch::outl(0xCF8, config_addr);
717        crate::arch::outl(0xCFC, new_cmd);
718    }
719}
720
721/// Get a reference to the global virtio-blk device, if initialized.
722pub fn get_device() -> Option<&'static Mutex<VirtioBlkDevice>> {
723    VIRTIO_BLK.get()
724}
725
726/// Check if a virtio-blk device has been initialized.
727pub fn is_initialized() -> bool {
728    VIRTIO_BLK.get().is_some()
729}
730
731/// Convert a physical address to a kernel-accessible virtual address.
732fn phys_to_kernel_virt(phys: u64) -> usize {
733    #[cfg(target_arch = "x86_64")]
734    {
735        if let Some(virt) = crate::arch::x86_64::msr::phys_to_virt(phys as usize) {
736            return virt;
737        }
738        (phys + 0xFFFF_8000_0000_0000) as usize
739    }
740
741    #[cfg(not(target_arch = "x86_64"))]
742    {
743        phys as usize
744    }
745}