1#![allow(dead_code)]
9
10#[cfg(feature = "alloc")]
11extern crate alloc;
12
13#[cfg(feature = "alloc")]
14use alloc::{collections::BTreeMap, vec::Vec};
15
16use super::{vfio::PciAddress, VmError};
17
18const SRIOV_CAP_ID: u16 = 0x0010;
24
25const MAX_VFS: usize = 256;
27
28const SRIOV_CAP_OFFSET: u16 = 0x04;
30const SRIOV_CTRL_OFFSET: u16 = 0x08;
31const SRIOV_TOTAL_VFS_OFFSET: u16 = 0x0E;
32const SRIOV_NUM_VFS_OFFSET: u16 = 0x10;
33const SRIOV_VF_OFFSET_OFFSET: u16 = 0x14;
34const SRIOV_VF_STRIDE_OFFSET: u16 = 0x16;
35const SRIOV_VF_DEVICE_ID_OFFSET: u16 = 0x1A;
36
37const SRIOV_CTRL_VF_ENABLE: u16 = 0x0001;
39const SRIOV_CTRL_VF_MIGRATION: u16 = 0x0002;
40const SRIOV_CTRL_ARI_CAPABLE: u16 = 0x0010;
41
42#[derive(Debug, Clone, Copy)]
48pub struct SriovCapability {
49 pub offset: u16,
51 pub total_vfs: u16,
53 pub num_vfs: u16,
55 pub vf_offset: u16,
57 pub vf_stride: u16,
59 pub vf_device_id: u16,
61 pub capabilities: u32,
63 pub migration_capable: bool,
65 pub ari_capable: bool,
67}
68
69impl SriovCapability {
70 pub fn parse(data: &[u8], offset: u16) -> Result<Self, VmError> {
75 if data.len() < 0x24 {
76 return Err(VmError::DeviceError);
77 }
78
79 let capabilities = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
80 let ctrl = u16::from_le_bytes([data[8], data[9]]);
81 let total_vfs = u16::from_le_bytes([data[0x0E], data[0x0F]]);
82 let num_vfs = u16::from_le_bytes([data[0x10], data[0x11]]);
83 let vf_offset = u16::from_le_bytes([data[0x14], data[0x15]]);
84 let vf_stride = u16::from_le_bytes([data[0x16], data[0x17]]);
85 let vf_device_id = u16::from_le_bytes([data[0x1A], data[0x1B]]);
86
87 Ok(Self {
88 offset,
89 total_vfs,
90 num_vfs,
91 vf_offset,
92 vf_stride,
93 vf_device_id,
94 capabilities,
95 migration_capable: ctrl & SRIOV_CTRL_VF_MIGRATION != 0,
96 ari_capable: ctrl & SRIOV_CTRL_ARI_CAPABLE != 0,
97 })
98 }
99
100 pub fn vf_address(&self, pf: &PciAddress, vf_index: u16) -> Option<PciAddress> {
102 if vf_index >= self.total_vfs {
103 return None;
104 }
105 let pf_bdf = pf.to_bdf();
106 let vf_bdf = pf_bdf
107 .checked_add(self.vf_offset)?
108 .checked_add(self.vf_stride.checked_mul(vf_index)?)?;
109 Some(PciAddress::from_bdf(vf_bdf))
110 }
111
112 pub fn cap_id() -> u16 {
114 SRIOV_CAP_ID
115 }
116}
117
118#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
124pub enum VfState {
125 #[default]
127 Disabled,
128 Enabled,
130 Assigned,
132}
133
134#[derive(Debug, Clone, Copy)]
136pub struct VirtualFunction {
137 pub vf_index: u16,
139 pub pci_address: PciAddress,
141 pub enabled: bool,
143 pub assigned_vm: Option<u32>,
145 pub state: VfState,
147}
148
149impl VirtualFunction {
150 pub fn new(vf_index: u16, pci_address: PciAddress) -> Self {
152 Self {
153 vf_index,
154 pci_address,
155 enabled: false,
156 assigned_vm: None,
157 state: VfState::Disabled,
158 }
159 }
160
161 pub fn enable(&mut self) {
163 self.enabled = true;
164 self.state = VfState::Enabled;
165 }
166
167 pub fn disable(&mut self) {
169 self.enabled = false;
170 self.assigned_vm = None;
171 self.state = VfState::Disabled;
172 }
173
174 pub fn assign(&mut self, vm_id: u32) -> Result<(), VmError> {
176 if !self.enabled {
177 return Err(VmError::DeviceError);
178 }
179 if self.assigned_vm.is_some() {
180 return Err(VmError::DeviceError);
181 }
182 self.assigned_vm = Some(vm_id);
183 self.state = VfState::Assigned;
184 Ok(())
185 }
186
187 pub fn unassign(&mut self) {
189 self.assigned_vm = None;
190 if self.enabled {
191 self.state = VfState::Enabled;
192 } else {
193 self.state = VfState::Disabled;
194 }
195 }
196
197 pub fn is_available(&self) -> bool {
199 self.enabled && self.assigned_vm.is_none()
200 }
201}
202
203#[cfg(feature = "alloc")]
209pub struct SriovDevice {
210 pub pf_address: PciAddress,
212 pub capability: SriovCapability,
214 pub vfs: Vec<VirtualFunction>,
216 pub vfs_enabled: bool,
218}
219
220#[cfg(feature = "alloc")]
221impl SriovDevice {
222 pub fn new(pf_address: PciAddress, capability: SriovCapability) -> Self {
224 Self {
225 pf_address,
226 capability,
227 vfs: Vec::new(),
228 vfs_enabled: false,
229 }
230 }
231
232 pub fn parse_capability(
234 pf_address: PciAddress,
235 data: &[u8],
236 offset: u16,
237 ) -> Result<Self, VmError> {
238 let cap = SriovCapability::parse(data, offset)?;
239 Ok(Self::new(pf_address, cap))
240 }
241
242 pub fn enable_vfs(&mut self, num_vfs: u16) -> Result<(), VmError> {
244 if num_vfs > self.capability.total_vfs || num_vfs as usize > MAX_VFS {
245 return Err(VmError::DeviceError);
246 }
247 if self.vfs_enabled {
248 return Err(VmError::VmxAlreadyEnabled);
249 }
250
251 self.vfs.clear();
252 for i in 0..num_vfs {
253 let vf_addr = self
254 .capability
255 .vf_address(&self.pf_address, i)
256 .ok_or(VmError::DeviceError)?;
257 let mut vf = VirtualFunction::new(i, vf_addr);
258 vf.enable();
259 self.vfs.push(vf);
260 }
261 self.capability.num_vfs = num_vfs;
262 self.vfs_enabled = true;
263 Ok(())
264 }
265
266 pub fn disable_vfs(&mut self) {
268 for vf in &mut self.vfs {
269 vf.disable();
270 }
271 self.vfs.clear();
272 self.capability.num_vfs = 0;
273 self.vfs_enabled = false;
274 }
275
276 pub fn assign_vf(&mut self, vf_index: u16, vm_id: u32) -> Result<(), VmError> {
278 let vf = self
279 .vfs
280 .iter_mut()
281 .find(|v| v.vf_index == vf_index)
282 .ok_or(VmError::DeviceError)?;
283 vf.assign(vm_id)
284 }
285
286 pub fn unassign_vf(&mut self, vf_index: u16) -> Result<(), VmError> {
288 let vf = self
289 .vfs
290 .iter_mut()
291 .find(|v| v.vf_index == vf_index)
292 .ok_or(VmError::DeviceError)?;
293 vf.unassign();
294 Ok(())
295 }
296
297 pub fn vf(&self, vf_index: u16) -> Option<&VirtualFunction> {
299 self.vfs.iter().find(|v| v.vf_index == vf_index)
300 }
301
302 pub fn num_enabled_vfs(&self) -> usize {
304 self.vfs.iter().filter(|v| v.enabled).count()
305 }
306
307 pub fn num_assigned_vfs(&self) -> usize {
309 self.vfs.iter().filter(|v| v.assigned_vm.is_some()).count()
310 }
311
312 pub fn available_vfs(&self) -> Vec<u16> {
314 self.vfs
315 .iter()
316 .filter(|v| v.is_available())
317 .map(|v| v.vf_index)
318 .collect()
319 }
320}
321
322#[cfg(feature = "alloc")]
328pub struct SriovManager {
329 devices: BTreeMap<u16, SriovDevice>,
331}
332
333#[cfg(feature = "alloc")]
334impl Default for SriovManager {
335 fn default() -> Self {
336 Self::new()
337 }
338}
339
340#[cfg(feature = "alloc")]
341impl SriovManager {
342 pub fn new() -> Self {
344 Self {
345 devices: BTreeMap::new(),
346 }
347 }
348
349 pub fn discover(&mut self, device: SriovDevice) {
351 let bdf = device.pf_address.to_bdf();
352 self.devices.insert(bdf, device);
353 }
354
355 pub fn get_device(&self, pf: &PciAddress) -> Option<&SriovDevice> {
357 self.devices.get(&pf.to_bdf())
358 }
359
360 pub fn get_device_mut(&mut self, pf: &PciAddress) -> Option<&mut SriovDevice> {
362 self.devices.get_mut(&pf.to_bdf())
363 }
364
365 pub fn list_vfs(&self) -> Vec<(PciAddress, &VirtualFunction)> {
367 let mut result = Vec::new();
368 for dev in self.devices.values() {
369 for vf in &dev.vfs {
370 result.push((dev.pf_address, vf));
371 }
372 }
373 result
374 }
375
376 pub fn device_count(&self) -> usize {
378 self.devices.len()
379 }
380
381 pub fn total_vfs(&self) -> usize {
383 self.devices.values().map(|d| d.num_enabled_vfs()).sum()
384 }
385
386 pub fn total_assigned_vfs(&self) -> usize {
388 self.devices.values().map(|d| d.num_assigned_vfs()).sum()
389 }
390}
391
392#[cfg(test)]
397mod tests {
398 use super::*;
399
400 fn make_sriov_config(
402 total_vfs: u16,
403 vf_offset: u16,
404 vf_stride: u16,
405 vf_dev_id: u16,
406 ) -> [u8; 0x24] {
407 let mut data = [0u8; 0x24];
408 data[4] = 0x01; data[8] = 0x00;
412 data[0x0E] = total_vfs as u8;
414 data[0x0F] = (total_vfs >> 8) as u8;
415 data[0x10] = 0;
417 data[0x14] = vf_offset as u8;
419 data[0x15] = (vf_offset >> 8) as u8;
420 data[0x16] = vf_stride as u8;
422 data[0x17] = (vf_stride >> 8) as u8;
423 data[0x1A] = vf_dev_id as u8;
425 data[0x1B] = (vf_dev_id >> 8) as u8;
426 data
427 }
428
429 #[test]
430 fn test_sriov_capability_parse() {
431 let config = make_sriov_config(8, 1, 1, 0x1234);
432 let cap = SriovCapability::parse(&config, 0x100).unwrap();
433 assert_eq!(cap.total_vfs, 8);
434 assert_eq!(cap.vf_offset, 1);
435 assert_eq!(cap.vf_stride, 1);
436 assert_eq!(cap.vf_device_id, 0x1234);
437 assert_eq!(cap.offset, 0x100);
438 }
439
440 #[test]
441 fn test_sriov_capability_parse_too_short() {
442 let data = [0u8; 10];
443 assert!(SriovCapability::parse(&data, 0).is_err());
444 }
445
446 #[test]
447 fn test_sriov_vf_address() {
448 let config = make_sriov_config(4, 2, 1, 0);
449 let cap = SriovCapability::parse(&config, 0).unwrap();
450 let pf = PciAddress::new(0, 3, 0);
451 let vf0 = cap.vf_address(&pf, 0).unwrap();
453 assert_eq!(vf0.to_bdf(), pf.to_bdf() + 2);
454 let vf1 = cap.vf_address(&pf, 1).unwrap();
455 assert_eq!(vf1.to_bdf(), pf.to_bdf() + 3);
456 }
457
458 #[test]
459 fn test_virtual_function_lifecycle() {
460 let mut vf = VirtualFunction::new(0, PciAddress::new(0, 3, 2));
461 assert_eq!(vf.state, VfState::Disabled);
462 assert!(!vf.is_available());
463
464 vf.enable();
465 assert_eq!(vf.state, VfState::Enabled);
466 assert!(vf.is_available());
467
468 vf.assign(1).unwrap();
469 assert_eq!(vf.state, VfState::Assigned);
470 assert!(!vf.is_available());
471 assert_eq!(vf.assigned_vm, Some(1));
472
473 vf.unassign();
474 assert_eq!(vf.state, VfState::Enabled);
475 assert!(vf.is_available());
476
477 vf.disable();
478 assert_eq!(vf.state, VfState::Disabled);
479 }
480
481 #[test]
482 fn test_vf_assign_disabled() {
483 let mut vf = VirtualFunction::new(0, PciAddress::new(0, 0, 0));
484 assert!(vf.assign(1).is_err());
485 }
486
487 #[test]
488 fn test_vf_double_assign() {
489 let mut vf = VirtualFunction::new(0, PciAddress::new(0, 0, 0));
490 vf.enable();
491 vf.assign(1).unwrap();
492 assert!(vf.assign(2).is_err());
493 }
494
495 #[test]
496 fn test_sriov_device_enable_vfs() {
497 let config = make_sriov_config(8, 1, 1, 0x5678);
498 let cap = SriovCapability::parse(&config, 0).unwrap();
499 let pf = PciAddress::new(0, 5, 0);
500 let mut dev = SriovDevice::new(pf, cap);
501
502 dev.enable_vfs(4).unwrap();
503 assert_eq!(dev.num_enabled_vfs(), 4);
504 assert!(dev.vfs_enabled);
505 }
506
507 #[test]
508 fn test_sriov_device_enable_too_many() {
509 let config = make_sriov_config(4, 1, 1, 0);
510 let cap = SriovCapability::parse(&config, 0).unwrap();
511 let mut dev = SriovDevice::new(PciAddress::new(0, 0, 0), cap);
512 assert!(dev.enable_vfs(5).is_err());
513 }
514
515 #[test]
516 fn test_sriov_device_disable_vfs() {
517 let config = make_sriov_config(8, 1, 1, 0);
518 let cap = SriovCapability::parse(&config, 0).unwrap();
519 let mut dev = SriovDevice::new(PciAddress::new(0, 0, 0), cap);
520 dev.enable_vfs(4).unwrap();
521 dev.disable_vfs();
522 assert_eq!(dev.num_enabled_vfs(), 0);
523 assert!(!dev.vfs_enabled);
524 }
525
526 #[test]
527 fn test_sriov_device_assign_vf() {
528 let config = make_sriov_config(8, 1, 1, 0);
529 let cap = SriovCapability::parse(&config, 0).unwrap();
530 let mut dev = SriovDevice::new(PciAddress::new(0, 0, 0), cap);
531 dev.enable_vfs(4).unwrap();
532 dev.assign_vf(0, 42).unwrap();
533 assert_eq!(dev.num_assigned_vfs(), 1);
534 assert_eq!(dev.vf(0).unwrap().assigned_vm, Some(42));
535 }
536
537 #[test]
538 fn test_sriov_device_unassign_vf() {
539 let config = make_sriov_config(8, 1, 1, 0);
540 let cap = SriovCapability::parse(&config, 0).unwrap();
541 let mut dev = SriovDevice::new(PciAddress::new(0, 0, 0), cap);
542 dev.enable_vfs(4).unwrap();
543 dev.assign_vf(1, 10).unwrap();
544 dev.unassign_vf(1).unwrap();
545 assert!(dev.vf(1).unwrap().is_available());
546 }
547
548 #[test]
549 fn test_sriov_manager() {
550 let config = make_sriov_config(4, 1, 1, 0);
551 let cap = SriovCapability::parse(&config, 0).unwrap();
552 let pf = PciAddress::new(0, 5, 0);
553 let mut dev = SriovDevice::new(pf, cap);
554 dev.enable_vfs(2).unwrap();
555
556 let mut mgr = SriovManager::new();
557 mgr.discover(dev);
558 assert_eq!(mgr.device_count(), 1);
559 assert_eq!(mgr.total_vfs(), 2);
560 assert_eq!(mgr.total_assigned_vfs(), 0);
561
562 let vfs = mgr.list_vfs();
563 assert_eq!(vfs.len(), 2);
564 }
565}