⚠️ VeridianOS Kernel Documentation - This is low-level kernel code. All functions are unsafe unless explicitly marked otherwise. no_std

veridian_kernel/drivers/virtio/
mmio.rs

1//! Virtio MMIO transport (virtio 1.0 legacy-compatible)
2//!
3//! Implements the virtio-over-MMIO transport layer as defined in the
4//! [virtio specification, section 4.2](https://docs.oasis-open.org/virtio/virtio/v1.2/virtio-v1.2.html).
5//! This is the transport used on AArch64 and RISC-V QEMU `virt` machines,
6//! where virtio devices are memory-mapped rather than behind PCI.
7//!
8//! # Default MMIO Base Addresses
9//!
10//! The `DEFAULT_BASES` array lists the first four virtio-mmio device regions
11//! for QEMU's `virt` machine. Each region is 0x200 bytes (512 bytes) and
12//! contains the standard virtio-mmio register set at the offsets defined in
13//! the `regs` module. Valid register offsets range from 0x000 to 0x0A4.
14//! Device-specific configuration space starts at offset 0x100 (modern) or
15//! STATUS + 0x14 (legacy).
16//!
17//! # Usage
18//!
19//! This is a minimal implementation sufficient for virtio-blk using split
20//! virtqueues. For x86_64, the PCI transport in `mod.rs` is used instead.
21//! See [`super::VirtioTransport`] for the unified transport enum.
22
23// Virtio MMIO transport -- AArch64/RISC-V device access
24
25use core::ptr;
26
27use crate::{
28    arch::barriers::{data_sync_barrier, instruction_sync_barrier},
29    error::KernelError,
30};
31
32/// Default virtio-mmio base addresses for QEMU's `virt` machine.
33///
34/// The two architectures use different memory maps:
35///
36/// - **AArch64**: virtio-mmio devices start at 0x0A00_0000 with 0x200 stride
37///   (up to 32 devices, each 512 bytes). See QEMU `hw/arm/virt.c`.
38/// - **RISC-V**: virtio-mmio devices start at 0x1000_1000 with 0x1000 stride
39///   (up to 8 devices, each 4 KB). See QEMU `hw/riscv/virt.c`.
40///
41/// We probe the first four slots; this is sufficient for virtio-blk discovery.
42#[cfg(target_arch = "aarch64")]
43pub const DEFAULT_BASES: [usize; 4] = [0x0a00_0000, 0x0a00_0200, 0x0a00_0400, 0x0a00_0600];
44
45#[cfg(target_arch = "riscv64")]
46pub const DEFAULT_BASES: [usize; 4] = [0x1000_1000, 0x1000_2000, 0x1000_3000, 0x1000_4000];
47
48/// Fallback for other architectures (should not be reached; MMIO transport is
49/// only used on AArch64 and RISC-V).
50#[cfg(not(any(target_arch = "aarch64", target_arch = "riscv64")))]
51pub const DEFAULT_BASES: [usize; 4] = [0x0a00_0000, 0x0a00_2000, 0x0a00_4000, 0x0a00_6000];
52
53/// MMIO register offsets (per virtio spec 4.2.2, legacy interface).
54///
55/// Valid offsets range from 0x000 (MAGIC) to 0x0A4 (QUEUE_USED_HIGH).
56/// Each register is 32 bits wide unless noted otherwise. The caller must
57/// ensure that `base + offset` falls within the 0x200-byte MMIO region
58/// mapped for the device.
59mod regs {
60    pub const MAGIC: usize = 0x000; // Magic value "virt"
61    pub const VERSION: usize = 0x004; // 1 = legacy, 2 = modern
62    pub const DEVICE_ID: usize = 0x008;
63    #[allow(dead_code)] // Virtio MMIO register per spec
64    pub const VENDOR_ID: usize = 0x00c;
65    pub const DEVICE_FEATURES: usize = 0x010;
66    pub const DEVICE_FEATURES_SEL: usize = 0x014;
67    pub const DRIVER_FEATURES: usize = 0x020;
68    pub const DRIVER_FEATURES_SEL: usize = 0x024;
69    pub const QUEUE_SEL: usize = 0x030;
70    pub const QUEUE_NUM_MAX: usize = 0x034;
71    pub const QUEUE_NUM: usize = 0x038;
72    pub const QUEUE_READY: usize = 0x044;
73    pub const QUEUE_NOTIFY: usize = 0x050;
74    pub const INTERRUPT_STATUS: usize = 0x060;
75    pub const INTERRUPT_ACK: usize = 0x064;
76    pub const STATUS: usize = 0x070;
77    // Physical addresses for split virtqueues
78    pub const QUEUE_DESC_LOW: usize = 0x080;
79    pub const QUEUE_DESC_HIGH: usize = 0x084;
80    pub const QUEUE_AVAIL_LOW: usize = 0x090;
81    pub const QUEUE_AVAIL_HIGH: usize = 0x094;
82    pub const QUEUE_USED_LOW: usize = 0x0a0;
83    pub const QUEUE_USED_HIGH: usize = 0x0a4;
84}
85
86/// Virtio-mmio status flags (same as PCI transport)
87mod status {
88    pub const ACKNOWLEDGE: u32 = 1;
89    pub const DRIVER: u32 = 2;
90    pub const DRIVER_OK: u32 = 4;
91    pub const FEATURES_OK: u32 = 8;
92    pub const FAILED: u32 = 128;
93}
94
95/// Handle for a single virtio-mmio device.
96///
97/// Wraps the kernel-virtual base address of a virtio-mmio register region
98/// and provides typed read/write accessors. The base address must point to
99/// a valid 0x200-byte MMIO region that is mapped in the kernel's address
100/// space (identity-mapped on AArch64/RISC-V, or via the physical memory
101/// window on x86_64).
102///
103/// # Safety Invariant
104///
105/// The `base` address must remain valid and mapped for the lifetime of this
106/// struct. All register accesses use volatile reads/writes to prevent the
107/// compiler from reordering or eliding MMIO operations.
108#[derive(Debug, Clone, Copy)]
109pub struct VirtioMmioTransport {
110    base: usize,
111}
112
113impl VirtioMmioTransport {
114    pub fn new(base: usize) -> Self {
115        Self { base }
116    }
117
118    #[inline]
119    fn read32(&self, offset: usize) -> u32 {
120        // SAFETY: base + offset is an MMIO region mapped in the kernel's phys window.
121        unsafe { ptr::read_volatile((self.base + offset) as *const u32) }
122    }
123
124    #[inline]
125    fn write32(&self, offset: usize, value: u32) {
126        // SAFETY: base + offset is an MMIO region mapped in the kernel's phys window.
127        unsafe { ptr::write_volatile((self.base + offset) as *mut u32, value) }
128    }
129
130    #[inline]
131    #[allow(dead_code)] // Register-width API completeness
132    fn write16(&self, offset: usize, value: u16) {
133        // SAFETY: base + offset is an MMIO region mapped in the kernel's phys window.
134        unsafe { ptr::write_volatile((self.base + offset) as *mut u16, value) }
135    }
136
137    pub fn matches_blk(&self) -> bool {
138        self.read32(regs::MAGIC) == 0x7472_6976 // "virt"
139            && self.read32(regs::DEVICE_ID) == 2 // 2 = block device
140    }
141
142    pub fn begin_init(&self) {
143        self.write32(regs::STATUS, 0);
144        self.set_status(status::ACKNOWLEDGE | status::DRIVER);
145    }
146
147    fn set_status(&self, bits: u32) {
148        let cur = self.read32(regs::STATUS);
149        self.write32(regs::STATUS, cur | bits);
150    }
151
152    pub fn set_failed(&self) {
153        self.write32(regs::STATUS, status::FAILED);
154    }
155
156    pub fn set_features_ok(&self) -> bool {
157        self.set_status(status::FEATURES_OK);
158        self.read32(regs::STATUS) & status::FEATURES_OK != 0
159    }
160
161    pub fn set_driver_ok(&self) {
162        self.set_status(status::DRIVER_OK);
163    }
164
165    pub fn read_device_features(&self) -> u32 {
166        self.write32(regs::DEVICE_FEATURES_SEL, 0);
167        self.read32(regs::DEVICE_FEATURES)
168    }
169
170    pub fn write_driver_features(&self, features: u32) {
171        self.write32(regs::DRIVER_FEATURES_SEL, 0);
172        self.write32(regs::DRIVER_FEATURES, features);
173    }
174
175    pub fn select_queue(&self, idx: u16) {
176        self.write32(regs::QUEUE_SEL, idx as u32);
177    }
178
179    pub fn read_queue_size_max(&self) -> u16 {
180        self.read32(regs::QUEUE_NUM_MAX) as u16
181    }
182
183    pub fn set_queue_size(&self, size: u16) {
184        self.write32(regs::QUEUE_NUM, size as u32);
185    }
186
187    pub fn set_queue_ready(&self) {
188        self.write32(regs::QUEUE_READY, 1);
189    }
190
191    pub fn write_queue_phys(&self, desc: u64, avail: u64, used: u64) {
192        self.write32(regs::QUEUE_DESC_LOW, desc as u32);
193        self.write32(regs::QUEUE_DESC_HIGH, (desc >> 32) as u32);
194        self.write32(regs::QUEUE_AVAIL_LOW, avail as u32);
195        self.write32(regs::QUEUE_AVAIL_HIGH, (avail >> 32) as u32);
196        self.write32(regs::QUEUE_USED_LOW, used as u32);
197        self.write32(regs::QUEUE_USED_HIGH, (used >> 32) as u32);
198        data_sync_barrier();
199        instruction_sync_barrier();
200    }
201
202    pub fn notify_queue(&self, idx: u16) {
203        self.write32(regs::QUEUE_NOTIFY, idx as u32);
204    }
205
206    pub fn ack_interrupts(&self) {
207        let pending = self.read32(regs::INTERRUPT_STATUS);
208        if pending != 0 {
209            self.write32(regs::INTERRUPT_ACK, pending);
210        }
211    }
212
213    pub fn read_config_u64(&self, offset: usize) -> u64 {
214        let lo = self.read32(regs::STATUS + 0x14 + offset) as u64; // config space follows status+0x14 in legacy mmio
215        let hi = self.read32(regs::STATUS + 0x18 + offset) as u64;
216        (hi << 32) | lo
217    }
218
219    pub fn version(&self) -> u32 {
220        self.read32(regs::VERSION)
221    }
222}
223
224/// Try to initialize a virtio-mmio block device at `base`.
225pub fn try_init_mmio_blk(
226    base: usize,
227) -> Result<crate::drivers::virtio::blk::VirtioBlkDevice, KernelError> {
228    let transport = VirtioMmioTransport::new(base);
229    if !transport.matches_blk() {
230        return Err(KernelError::HardwareError {
231            device: "virtio-blk-mmio",
232            code: 0xdead0001,
233        });
234    }
235
236    // Only handle legacy/modern v1+; QEMU virt reports version 2 (modern). We
237    // use split virtqueues with 64-bit addresses which are supported in v2.
238    let version = transport.version();
239    if version < 1 {
240        return Err(KernelError::HardwareError {
241            device: "virtio-blk-mmio",
242            code: 0xdead0002,
243        });
244    }
245
246    transport.begin_init();
247
248    let device_features = transport.read_device_features();
249    let accepted = device_features
250        & (super::blk::features::VIRTIO_BLK_F_SIZE_MAX
251            | super::blk::features::VIRTIO_BLK_F_SEG_MAX
252            | super::blk::features::VIRTIO_BLK_F_RO
253            | super::blk::features::VIRTIO_BLK_F_BLK_SIZE
254            | super::blk::features::VIRTIO_BLK_F_FLUSH);
255    transport.write_driver_features(accepted);
256
257    if !transport.set_features_ok() {
258        transport.set_failed();
259        return Err(KernelError::HardwareError {
260            device: "virtio-blk-mmio",
261            code: 0xdead0004,
262        });
263    }
264
265    // Queue 0 setup
266    transport.select_queue(0);
267    let qmax = transport.read_queue_size_max();
268    if qmax == 0 {
269        transport.set_failed();
270        return Err(KernelError::HardwareError {
271            device: "virtio-blk-mmio",
272            code: 0xdead0003,
273        });
274    }
275
276    let queue = crate::drivers::virtio::queue::VirtQueue::new(qmax)?;
277    transport.set_queue_size(queue.size());
278    transport.write_queue_phys(queue.phys_desc(), queue.phys_avail(), queue.phys_used());
279    transport.set_queue_ready();
280
281    transport.set_driver_ok();
282
283    let capacity_sectors = transport.read_config_u64(0);
284    let read_only = (accepted & super::blk::features::VIRTIO_BLK_F_RO) != 0;
285
286    crate::println!(
287        "[VIRTIO-BLK/MMIO] Initialized: {} sectors ({} KB) at {:#x}, {}",
288        capacity_sectors,
289        capacity_sectors * super::blk::BLOCK_SIZE as u64 / 1024,
290        base,
291        if read_only { "read-only" } else { "read-write" }
292    );
293
294    Ok(crate::drivers::virtio::blk::VirtioBlkDevice::from_mmio(
295        transport,
296        queue,
297        capacity_sectors,
298        read_only,
299        accepted,
300    ))
301}