1#![allow(dead_code)]
9
10#[cfg(feature = "alloc")]
11extern crate alloc;
12
13#[cfg(feature = "alloc")]
14use alloc::{collections::BTreeMap, string::String, vec, vec::Vec};
15
16use super::VmError;
17
18const MIGRATION_MAGIC: u32 = 0x5145_4D49;
24
25const MIGRATION_VERSION: u32 = 3;
27
28const PAGE_SIZE: u64 = 4096;
30
31const BITS_PER_WORD: u64 = 64;
33
34const MAX_IO_HANDLERS: usize = 256;
36
37const MAX_MMIO_HANDLERS: usize = 128;
39
40const DEFAULT_DIRTY_THRESHOLD: u64 = 256;
42
43pub trait DeviceModelInterface: Send {
49 fn handle_io(&mut self, port: u16, is_write: bool, data: &mut [u8]) -> Result<(), VmError>;
51
52 fn handle_mmio(&mut self, addr: u64, is_write: bool, data: &mut [u8]) -> Result<(), VmError>;
54
55 fn get_state(&self) -> DeviceState;
57
58 fn set_state(&mut self, state: &DeviceState) -> Result<(), VmError>;
60}
61
62#[derive(Debug, Clone, Copy)]
68pub struct IoHandler {
69 pub port_start: u16,
71 pub port_count: u16,
73 pub device_id: u32,
75}
76
77impl IoHandler {
78 pub fn new(port_start: u16, port_count: u16, device_id: u32) -> Self {
80 Self {
81 port_start,
82 port_count,
83 device_id,
84 }
85 }
86
87 pub(crate) fn contains_port(&self, port: u16) -> bool {
89 port >= self.port_start && port < self.port_start + self.port_count
90 }
91
92 pub(crate) fn port_end(&self) -> u16 {
94 self.port_start + self.port_count
95 }
96
97 pub(crate) fn dispatch_io(&self, port: u16) -> Option<u16> {
99 if self.contains_port(port) {
100 Some(port - self.port_start)
101 } else {
102 None
103 }
104 }
105}
106
107#[derive(Debug, Clone, Copy)]
113pub struct MmioHandler {
114 pub base_addr: u64,
116 pub size: u64,
118 pub device_id: u32,
120}
121
122impl MmioHandler {
123 pub fn new(base_addr: u64, size: u64, device_id: u32) -> Self {
125 Self {
126 base_addr,
127 size,
128 device_id,
129 }
130 }
131
132 pub(crate) fn contains_addr(&self, addr: u64) -> bool {
134 addr >= self.base_addr && addr < self.base_addr + self.size
135 }
136
137 pub(crate) fn addr_end(&self) -> u64 {
139 self.base_addr + self.size
140 }
141
142 pub(crate) fn dispatch_mmio(&self, addr: u64) -> Option<u64> {
144 if self.contains_addr(addr) {
145 Some(addr - self.base_addr)
146 } else {
147 None
148 }
149 }
150}
151
152#[cfg(feature = "alloc")]
158pub struct DeviceMultiplexer {
159 io_handlers: BTreeMap<u16, IoHandler>,
161 mmio_handlers: BTreeMap<u64, MmioHandler>,
163}
164
165#[cfg(feature = "alloc")]
166impl Default for DeviceMultiplexer {
167 fn default() -> Self {
168 Self::new()
169 }
170}
171
172#[cfg(feature = "alloc")]
173impl DeviceMultiplexer {
174 pub fn new() -> Self {
176 Self {
177 io_handlers: BTreeMap::new(),
178 mmio_handlers: BTreeMap::new(),
179 }
180 }
181
182 pub(crate) fn register_io(&mut self, handler: IoHandler) -> Result<(), VmError> {
184 if self.io_handlers.len() >= MAX_IO_HANDLERS {
185 return Err(VmError::DeviceError);
186 }
187
188 for existing in self.io_handlers.values() {
190 if handler.port_start < existing.port_end() && handler.port_end() > existing.port_start
191 {
192 return Err(VmError::DeviceError);
193 }
194 }
195
196 self.io_handlers.insert(handler.port_start, handler);
197 Ok(())
198 }
199
200 pub(crate) fn register_mmio(&mut self, handler: MmioHandler) -> Result<(), VmError> {
202 if self.mmio_handlers.len() >= MAX_MMIO_HANDLERS {
203 return Err(VmError::DeviceError);
204 }
205
206 for existing in self.mmio_handlers.values() {
208 if handler.base_addr < existing.addr_end() && handler.addr_end() > existing.base_addr {
209 return Err(VmError::DeviceError);
210 }
211 }
212
213 self.mmio_handlers.insert(handler.base_addr, handler);
214 Ok(())
215 }
216
217 pub(crate) fn dispatch_io(&self, port: u16) -> Option<(u32, u16)> {
219 for handler in self.io_handlers.values() {
221 if let Some(offset) = handler.dispatch_io(port) {
222 return Some((handler.device_id, offset));
223 }
224 }
225 None
226 }
227
228 pub(crate) fn dispatch_mmio(&self, addr: u64) -> Option<(u32, u64)> {
230 for handler in self.mmio_handlers.values() {
231 if let Some(offset) = handler.dispatch_mmio(addr) {
232 return Some((handler.device_id, offset));
233 }
234 }
235 None
236 }
237
238 pub(crate) fn unregister_io(&mut self, port_start: u16) -> bool {
240 self.io_handlers.remove(&port_start).is_some()
241 }
242
243 pub(crate) fn unregister_mmio(&mut self, base_addr: u64) -> bool {
245 self.mmio_handlers.remove(&base_addr).is_some()
246 }
247
248 pub(crate) fn io_handler_count(&self) -> usize {
250 self.io_handlers.len()
251 }
252
253 pub(crate) fn mmio_handler_count(&self) -> usize {
255 self.mmio_handlers.len()
256 }
257}
258
259#[cfg(feature = "alloc")]
265#[derive(Debug, Clone)]
266pub struct DeviceState {
267 pub device_name: String,
269 pub data: Vec<u8>,
271}
272
273#[cfg(feature = "alloc")]
274impl DeviceState {
275 pub fn new(name: &str) -> Self {
277 Self {
278 device_name: String::from(name),
279 data: Vec::new(),
280 }
281 }
282
283 pub fn with_data(name: &str, data: Vec<u8>) -> Self {
285 Self {
286 device_name: String::from(name),
287 data,
288 }
289 }
290
291 pub(crate) fn write_u32(&mut self, value: u32) {
293 self.data.extend_from_slice(&value.to_le_bytes());
294 }
295
296 pub(crate) fn write_u64(&mut self, value: u64) {
298 self.data.extend_from_slice(&value.to_le_bytes());
299 }
300
301 pub(crate) fn write_bytes(&mut self, bytes: &[u8]) {
303 self.write_u32(bytes.len() as u32);
304 self.data.extend_from_slice(bytes);
305 }
306
307 pub(crate) fn read_u32(&self, offset: &mut usize) -> Option<u32> {
309 if *offset + 4 > self.data.len() {
310 return None;
311 }
312 let bytes: [u8; 4] = self.data[*offset..*offset + 4].try_into().ok()?;
313 *offset += 4;
314 Some(u32::from_le_bytes(bytes))
315 }
316
317 pub(crate) fn read_u64(&self, offset: &mut usize) -> Option<u64> {
319 if *offset + 8 > self.data.len() {
320 return None;
321 }
322 let bytes: [u8; 8] = self.data[*offset..*offset + 8].try_into().ok()?;
323 *offset += 8;
324 Some(u64::from_le_bytes(bytes))
325 }
326
327 pub(crate) fn read_bytes(&self, offset: &mut usize) -> Option<Vec<u8>> {
329 let len = self.read_u32(offset)? as usize;
330 if *offset + len > self.data.len() {
331 return None;
332 }
333 let data = self.data[*offset..*offset + len].to_vec();
334 *offset += len;
335 Some(data)
336 }
337
338 pub(crate) fn serialized_size(&self) -> usize {
340 4 + self.device_name.len() + self.data.len() }
342}
343
344#[cfg(feature = "alloc")]
350#[derive(Debug, Clone, Default)]
351pub struct VmState {
352 pub vcpu_regs: Vec<Vec<u8>>,
354 pub memory_hash: u64,
356 pub memory_size: u64,
358 pub num_vcpus: u32,
360}
361
362#[cfg(feature = "alloc")]
363impl VmState {
364 pub fn new(num_vcpus: u32, memory_size: u64) -> Self {
366 Self {
367 vcpu_regs: Vec::with_capacity(num_vcpus as usize),
368 memory_hash: 0,
369 memory_size,
370 num_vcpus,
371 }
372 }
373
374 pub(crate) fn add_vcpu_state(&mut self, regs: Vec<u8>) {
376 self.vcpu_regs.push(regs);
377 }
378
379 pub(crate) fn set_memory_hash(&mut self, hash: u64) {
381 self.memory_hash = hash;
382 }
383}
384
385#[derive(Debug, Clone, Copy)]
391pub struct MigrationHeader {
392 pub magic: u32,
394 pub version: u32,
396 pub vm_state_size: u32,
398 pub device_state_size: u32,
400 pub device_count: u32,
402 pub flags: u32,
404}
405
406impl MigrationHeader {
407 pub fn new(vm_state_size: u32, device_state_size: u32, device_count: u32) -> Self {
409 Self {
410 magic: MIGRATION_MAGIC,
411 version: MIGRATION_VERSION,
412 vm_state_size,
413 device_state_size,
414 device_count,
415 flags: 0,
416 }
417 }
418
419 pub(crate) fn validate(&self) -> Result<(), VmError> {
421 if self.magic != MIGRATION_MAGIC {
422 return Err(VmError::InvalidVmState);
423 }
424 if self.version != MIGRATION_VERSION {
425 return Err(VmError::InvalidVmState);
426 }
427 Ok(())
428 }
429
430 #[cfg(feature = "alloc")]
432 pub fn to_bytes(&self) -> Vec<u8> {
433 let mut buf = Vec::with_capacity(24);
434 buf.extend_from_slice(&self.magic.to_le_bytes());
435 buf.extend_from_slice(&self.version.to_le_bytes());
436 buf.extend_from_slice(&self.vm_state_size.to_le_bytes());
437 buf.extend_from_slice(&self.device_state_size.to_le_bytes());
438 buf.extend_from_slice(&self.device_count.to_le_bytes());
439 buf.extend_from_slice(&self.flags.to_le_bytes());
440 buf
441 }
442
443 pub fn from_bytes(data: &[u8]) -> Result<Self, VmError> {
445 if data.len() < 24 {
446 return Err(VmError::InvalidVmState);
447 }
448 let magic = u32::from_le_bytes([data[0], data[1], data[2], data[3]]);
449 let version = u32::from_le_bytes([data[4], data[5], data[6], data[7]]);
450 let vm_state_size = u32::from_le_bytes([data[8], data[9], data[10], data[11]]);
451 let device_state_size = u32::from_le_bytes([data[12], data[13], data[14], data[15]]);
452 let device_count = u32::from_le_bytes([data[16], data[17], data[18], data[19]]);
453 let flags = u32::from_le_bytes([data[20], data[21], data[22], data[23]]);
454
455 let header = Self {
456 magic,
457 version,
458 vm_state_size,
459 device_state_size,
460 device_count,
461 flags,
462 };
463 header.validate()?;
464 Ok(header)
465 }
466}
467
468#[cfg(feature = "alloc")]
474pub(crate) fn serialize_state(vm_state: &VmState, device_states: &[DeviceState]) -> Vec<u8> {
475 let mut vm_data = Vec::new();
476 vm_data.extend_from_slice(&vm_state.num_vcpus.to_le_bytes());
478 vm_data.extend_from_slice(&vm_state.memory_size.to_le_bytes());
479 vm_data.extend_from_slice(&vm_state.memory_hash.to_le_bytes());
480 for vcpu_regs in &vm_state.vcpu_regs {
482 vm_data.extend_from_slice(&(vcpu_regs.len() as u32).to_le_bytes());
483 vm_data.extend_from_slice(vcpu_regs);
484 }
485
486 let mut device_data = Vec::new();
487 for ds in device_states {
488 device_data.extend_from_slice(&(ds.device_name.len() as u32).to_le_bytes());
490 device_data.extend_from_slice(ds.device_name.as_bytes());
491 device_data.extend_from_slice(&(ds.data.len() as u32).to_le_bytes());
493 device_data.extend_from_slice(&ds.data);
494 }
495
496 let header = MigrationHeader::new(
497 vm_data.len() as u32,
498 device_data.len() as u32,
499 device_states.len() as u32,
500 );
501
502 let mut stream = header.to_bytes();
503 stream.extend_from_slice(&vm_data);
504 stream.extend_from_slice(&device_data);
505 stream
506}
507
508#[cfg(feature = "alloc")]
510pub(crate) fn deserialize_state(data: &[u8]) -> Result<(VmState, Vec<DeviceState>), VmError> {
511 let header = MigrationHeader::from_bytes(data)?;
512
513 let vm_offset = 24; let device_offset = vm_offset + header.vm_state_size as usize;
515
516 if data.len() < device_offset + header.device_state_size as usize {
517 return Err(VmError::InvalidVmState);
518 }
519
520 let vm_data = &data[vm_offset..device_offset];
522 if vm_data.len() < 20 {
523 return Err(VmError::InvalidVmState);
524 }
525
526 let num_vcpus = u32::from_le_bytes([vm_data[0], vm_data[1], vm_data[2], vm_data[3]]);
527 let memory_size = u64::from_le_bytes([
528 vm_data[4],
529 vm_data[5],
530 vm_data[6],
531 vm_data[7],
532 vm_data[8],
533 vm_data[9],
534 vm_data[10],
535 vm_data[11],
536 ]);
537 let memory_hash = u64::from_le_bytes([
538 vm_data[12],
539 vm_data[13],
540 vm_data[14],
541 vm_data[15],
542 vm_data[16],
543 vm_data[17],
544 vm_data[18],
545 vm_data[19],
546 ]);
547
548 let mut vm_state = VmState::new(num_vcpus, memory_size);
549 vm_state.set_memory_hash(memory_hash);
550
551 let mut pos = 20;
552 for _ in 0..num_vcpus {
553 if pos + 4 > vm_data.len() {
554 break;
555 }
556 let len = u32::from_le_bytes([
557 vm_data[pos],
558 vm_data[pos + 1],
559 vm_data[pos + 2],
560 vm_data[pos + 3],
561 ]) as usize;
562 pos += 4;
563 if pos + len > vm_data.len() {
564 break;
565 }
566 vm_state.add_vcpu_state(vm_data[pos..pos + len].to_vec());
567 pos += len;
568 }
569
570 let dev_data = &data[device_offset..device_offset + header.device_state_size as usize];
572 let mut device_states = Vec::new();
573 let mut dpos = 0;
574
575 for _ in 0..header.device_count {
576 if dpos + 4 > dev_data.len() {
577 break;
578 }
579 let name_len = u32::from_le_bytes([
580 dev_data[dpos],
581 dev_data[dpos + 1],
582 dev_data[dpos + 2],
583 dev_data[dpos + 3],
584 ]) as usize;
585 dpos += 4;
586
587 if dpos + name_len > dev_data.len() {
588 break;
589 }
590 let name = core::str::from_utf8(&dev_data[dpos..dpos + name_len]).unwrap_or("unknown");
591 dpos += name_len;
592
593 if dpos + 4 > dev_data.len() {
594 break;
595 }
596 let data_len = u32::from_le_bytes([
597 dev_data[dpos],
598 dev_data[dpos + 1],
599 dev_data[dpos + 2],
600 dev_data[dpos + 3],
601 ]) as usize;
602 dpos += 4;
603
604 if dpos + data_len > dev_data.len() {
605 break;
606 }
607 let state_data = dev_data[dpos..dpos + data_len].to_vec();
608 dpos += data_len;
609
610 device_states.push(DeviceState::with_data(name, state_data));
611 }
612
613 Ok((vm_state, device_states))
614}
615
616#[cfg(feature = "alloc")]
622pub struct DirtyPageTracker {
623 bitmap: Vec<u64>,
625 page_size: u64,
627 num_pages: u64,
629 dirty_count: u64,
631}
632
633#[cfg(feature = "alloc")]
634impl DirtyPageTracker {
635 pub fn new(memory_size: u64, page_size: u64) -> Self {
637 let ps = if page_size == 0 { PAGE_SIZE } else { page_size };
638 let num_pages = memory_size.div_ceil(ps);
639 let bitmap_words = num_pages.div_ceil(BITS_PER_WORD) as usize;
640 Self {
641 bitmap: vec![0u64; bitmap_words],
642 page_size: ps,
643 num_pages,
644 dirty_count: 0,
645 }
646 }
647
648 pub(crate) fn mark_dirty(&mut self, page_num: u64) {
650 if page_num >= self.num_pages {
651 return;
652 }
653 let word = (page_num / BITS_PER_WORD) as usize;
654 let bit = page_num % BITS_PER_WORD;
655 if word < self.bitmap.len() {
656 let mask = 1u64 << bit;
657 if self.bitmap[word] & mask == 0 {
658 self.bitmap[word] |= mask;
659 self.dirty_count = self.dirty_count.saturating_add(1);
660 }
661 }
662 }
663
664 pub(crate) fn mark_dirty_addr(&mut self, addr: u64) {
666 self.mark_dirty(addr / self.page_size);
667 }
668
669 pub(crate) fn is_dirty(&self, page_num: u64) -> bool {
671 if page_num >= self.num_pages {
672 return false;
673 }
674 let word = (page_num / BITS_PER_WORD) as usize;
675 let bit = page_num % BITS_PER_WORD;
676 if word < self.bitmap.len() {
677 self.bitmap[word] & (1u64 << bit) != 0
678 } else {
679 false
680 }
681 }
682
683 pub(crate) fn get_dirty_pages(&self) -> Vec<u64> {
685 let mut pages = Vec::new();
686 for (word_idx, &word) in self.bitmap.iter().enumerate() {
687 if word == 0 {
688 continue;
689 }
690 for bit in 0..64u64 {
691 if word & (1u64 << bit) != 0 {
692 let page = word_idx as u64 * BITS_PER_WORD + bit;
693 if page < self.num_pages {
694 pages.push(page);
695 }
696 }
697 }
698 }
699 pages
700 }
701
702 pub(crate) fn clear(&mut self) -> Vec<u64> {
704 let dirty = self.get_dirty_pages();
705 for word in &mut self.bitmap {
706 *word = 0;
707 }
708 self.dirty_count = 0;
709 dirty
710 }
711
712 pub(crate) fn dirty_count(&self) -> u64 {
714 self.dirty_count
715 }
716
717 pub(crate) fn total_pages(&self) -> u64 {
719 self.num_pages
720 }
721
722 pub(crate) fn page_size(&self) -> u64 {
724 self.page_size
725 }
726
727 pub(crate) fn mark_all_dirty(&mut self) {
729 for (i, word) in self.bitmap.iter_mut().enumerate() {
730 let remaining = self.num_pages.saturating_sub(i as u64 * BITS_PER_WORD);
731 if remaining >= BITS_PER_WORD {
732 *word = u64::MAX;
733 } else if remaining > 0 {
734 *word = (1u64 << remaining) - 1;
735 }
736 }
737 self.dirty_count = self.num_pages;
738 }
739}
740
741#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
747pub enum MigrationPhase {
748 #[default]
750 Idle,
751 PreCopy,
753 StopAndCopy,
755 Done,
757 Failed,
759}
760
761#[cfg(feature = "alloc")]
767pub struct MigrationStream {
768 pub phase: MigrationPhase,
770 pub rounds: u32,
772 pub dirty_page_threshold: u64,
774 pub tracker: DirtyPageTracker,
776 pub pages_sent: u64,
778 pub total_pages_sent: u64,
780 pub total_bytes_sent: u64,
782 pub max_rounds: u32,
784}
785
786#[cfg(feature = "alloc")]
787impl MigrationStream {
788 pub fn new(memory_size: u64) -> Self {
790 Self {
791 phase: MigrationPhase::Idle,
792 rounds: 0,
793 dirty_page_threshold: DEFAULT_DIRTY_THRESHOLD,
794 tracker: DirtyPageTracker::new(memory_size, PAGE_SIZE),
795 pages_sent: 0,
796 total_pages_sent: 0,
797 total_bytes_sent: 0,
798 max_rounds: 32,
799 }
800 }
801
802 pub(crate) fn start_precopy(&mut self) {
804 self.phase = MigrationPhase::PreCopy;
805 self.rounds = 0;
806 self.tracker.mark_all_dirty();
807 }
808
809 pub(crate) fn send_dirty_pages(&mut self) -> Vec<u64> {
812 if self.phase != MigrationPhase::PreCopy {
813 return Vec::new();
814 }
815
816 let dirty_pages = self.tracker.clear();
817 let count = dirty_pages.len() as u64;
818 self.pages_sent = count;
819 self.total_pages_sent = self.total_pages_sent.saturating_add(count);
820 self.total_bytes_sent = self.total_bytes_sent.saturating_add(count * PAGE_SIZE);
821 self.rounds += 1;
822
823 dirty_pages
824 }
825
826 pub(crate) fn receive_dirty_pages(&mut self, pages: &[u64]) -> Result<(), VmError> {
828 self.total_pages_sent = self.total_pages_sent.saturating_add(pages.len() as u64);
830 Ok(())
831 }
832
833 pub(crate) fn should_stop_and_copy(&self) -> bool {
835 if self.phase != MigrationPhase::PreCopy {
836 return false;
837 }
838 self.tracker.dirty_count() <= self.dirty_page_threshold || self.rounds >= self.max_rounds
840 }
841
842 pub(crate) fn stop_and_copy(&mut self) -> Vec<u64> {
844 self.phase = MigrationPhase::StopAndCopy;
845
846 let final_pages = self.tracker.clear();
848 let count = final_pages.len() as u64;
849 self.total_pages_sent = self.total_pages_sent.saturating_add(count);
850 self.total_bytes_sent = self.total_bytes_sent.saturating_add(count * PAGE_SIZE);
851
852 final_pages
853 }
854
855 pub(crate) fn complete(&mut self) {
857 self.phase = MigrationPhase::Done;
858 }
859
860 pub(crate) fn fail(&mut self) {
862 self.phase = MigrationPhase::Failed;
863 }
864
865 pub(crate) fn stats(&self) -> MigrationStats {
867 MigrationStats {
868 phase: self.phase,
869 rounds: self.rounds,
870 total_pages_sent: self.total_pages_sent,
871 total_bytes_sent: self.total_bytes_sent,
872 remaining_dirty: self.tracker.dirty_count(),
873 }
874 }
875}
876
877#[derive(Debug, Clone, Copy)]
879pub struct MigrationStats {
880 pub phase: MigrationPhase,
882 pub rounds: u32,
884 pub total_pages_sent: u64,
886 pub total_bytes_sent: u64,
888 pub remaining_dirty: u64,
890}
891
892#[cfg(test)]
897mod tests {
898 use super::*;
899
900 #[test]
901 fn test_io_handler_contains() {
902 let handler = IoHandler::new(0x3F8, 8, 1);
903 assert!(handler.contains_port(0x3F8));
904 assert!(handler.contains_port(0x3FF));
905 assert!(!handler.contains_port(0x400));
906 assert!(!handler.contains_port(0x3F7));
907 }
908
909 #[test]
910 fn test_io_handler_dispatch() {
911 let handler = IoHandler::new(0x3F8, 8, 1);
912 assert_eq!(handler.dispatch_io(0x3F8), Some(0));
913 assert_eq!(handler.dispatch_io(0x3FA), Some(2));
914 assert_eq!(handler.dispatch_io(0x400), None);
915 }
916
917 #[test]
918 fn test_mmio_handler_contains() {
919 let handler = MmioHandler::new(0xFEE0_0000, 0x1000, 2);
920 assert!(handler.contains_addr(0xFEE0_0000));
921 assert!(handler.contains_addr(0xFEE0_0FFF));
922 assert!(!handler.contains_addr(0xFEE0_1000));
923 }
924
925 #[test]
926 fn test_mmio_handler_dispatch() {
927 let handler = MmioHandler::new(0xFEE0_0000, 0x1000, 2);
928 assert_eq!(handler.dispatch_mmio(0xFEE0_0010), Some(0x10));
929 assert_eq!(handler.dispatch_mmio(0xFEE0_1000), None);
930 }
931
932 #[test]
933 fn test_device_multiplexer_io() {
934 let mut mux = DeviceMultiplexer::new();
935 let h1 = IoHandler::new(0x3F8, 8, 1);
936 let h2 = IoHandler::new(0x2F8, 8, 2);
937 assert!(mux.register_io(h1).is_ok());
938 assert!(mux.register_io(h2).is_ok());
939 assert_eq!(mux.io_handler_count(), 2);
940
941 assert_eq!(mux.dispatch_io(0x3F8), Some((1, 0)));
942 assert_eq!(mux.dispatch_io(0x2FA), Some((2, 2)));
943 assert_eq!(mux.dispatch_io(0x100), None);
944 }
945
946 #[test]
947 fn test_device_multiplexer_io_overlap() {
948 let mut mux = DeviceMultiplexer::new();
949 let h1 = IoHandler::new(0x3F8, 8, 1);
950 let h2 = IoHandler::new(0x3FC, 4, 2); assert!(mux.register_io(h1).is_ok());
952 assert!(mux.register_io(h2).is_err());
953 }
954
955 #[test]
956 fn test_device_multiplexer_mmio() {
957 let mut mux = DeviceMultiplexer::new();
958 let h = MmioHandler::new(0xFEE0_0000, 0x1000, 1);
959 assert!(mux.register_mmio(h).is_ok());
960 assert_eq!(mux.dispatch_mmio(0xFEE0_0020), Some((1, 0x20)));
961 }
962
963 #[test]
964 fn test_device_multiplexer_unregister() {
965 let mut mux = DeviceMultiplexer::new();
966 mux.register_io(IoHandler::new(0x3F8, 8, 1)).unwrap();
967 assert!(mux.unregister_io(0x3F8));
968 assert_eq!(mux.io_handler_count(), 0);
969 assert!(!mux.unregister_io(0x3F8)); }
971
972 #[test]
973 fn test_migration_header_roundtrip() {
974 let header = MigrationHeader::new(100, 200, 3);
975 let bytes = header.to_bytes();
976 let parsed = MigrationHeader::from_bytes(&bytes).unwrap();
977 assert_eq!(parsed.magic, MIGRATION_MAGIC);
978 assert_eq!(parsed.version, MIGRATION_VERSION);
979 assert_eq!(parsed.vm_state_size, 100);
980 assert_eq!(parsed.device_state_size, 200);
981 assert_eq!(parsed.device_count, 3);
982 }
983
984 #[test]
985 fn test_migration_header_invalid_magic() {
986 let mut bytes = MigrationHeader::new(0, 0, 0).to_bytes();
987 bytes[0] = 0xFF; assert!(MigrationHeader::from_bytes(&bytes).is_err());
989 }
990
991 #[test]
992 fn test_device_state_write_read() {
993 let mut state = DeviceState::new("uart0");
994 state.write_u32(42);
995 state.write_u64(0xDEAD_BEEF);
996 state.write_bytes(&[1, 2, 3, 4]);
997
998 let mut offset = 0;
999 assert_eq!(state.read_u32(&mut offset), Some(42));
1000 assert_eq!(state.read_u64(&mut offset), Some(0xDEAD_BEEF));
1001 let bytes = state.read_bytes(&mut offset).unwrap();
1002 assert_eq!(bytes, &[1, 2, 3, 4]);
1003 }
1004
1005 #[test]
1006 fn test_serialize_deserialize_state() {
1007 let mut vm_state = VmState::new(2, 0x1000_0000);
1008 vm_state.set_memory_hash(0x1234_5678);
1009 vm_state.add_vcpu_state(vec![1, 2, 3, 4]);
1010 vm_state.add_vcpu_state(vec![5, 6, 7, 8]);
1011
1012 let device_states = vec![
1013 DeviceState::with_data("uart", vec![0xAA, 0xBB]),
1014 DeviceState::with_data("pic", vec![0xCC]),
1015 ];
1016
1017 let data = serialize_state(&vm_state, &device_states);
1018 let (parsed_vm, parsed_devs) = deserialize_state(&data).unwrap();
1019
1020 assert_eq!(parsed_vm.num_vcpus, 2);
1021 assert_eq!(parsed_vm.memory_size, 0x1000_0000);
1022 assert_eq!(parsed_vm.memory_hash, 0x1234_5678);
1023 assert_eq!(parsed_vm.vcpu_regs.len(), 2);
1024 assert_eq!(parsed_vm.vcpu_regs[0], &[1, 2, 3, 4]);
1025 assert_eq!(parsed_devs.len(), 2);
1026 assert_eq!(parsed_devs[0].device_name, "uart");
1027 assert_eq!(parsed_devs[0].data, &[0xAA, 0xBB]);
1028 }
1029
1030 #[test]
1031 fn test_dirty_page_tracker_basic() {
1032 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1033 assert_eq!(tracker.total_pages(), 16);
1034 assert_eq!(tracker.dirty_count(), 0);
1035
1036 tracker.mark_dirty(0);
1037 tracker.mark_dirty(5);
1038 assert!(tracker.is_dirty(0));
1039 assert!(tracker.is_dirty(5));
1040 assert!(!tracker.is_dirty(1));
1041 assert_eq!(tracker.dirty_count(), 2);
1042 }
1043
1044 #[test]
1045 fn test_dirty_page_tracker_by_addr() {
1046 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1047 tracker.mark_dirty_addr(0x5000);
1048 assert!(tracker.is_dirty(5));
1049 }
1050
1051 #[test]
1052 fn test_dirty_page_tracker_get_pages() {
1053 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1054 tracker.mark_dirty(1);
1055 tracker.mark_dirty(3);
1056 tracker.mark_dirty(7);
1057 let pages = tracker.get_dirty_pages();
1058 assert_eq!(pages, &[1, 3, 7]);
1059 }
1060
1061 #[test]
1062 fn test_dirty_page_tracker_clear() {
1063 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1064 tracker.mark_dirty(0);
1065 tracker.mark_dirty(1);
1066 let cleared = tracker.clear();
1067 assert_eq!(cleared.len(), 2);
1068 assert_eq!(tracker.dirty_count(), 0);
1069 assert!(!tracker.is_dirty(0));
1070 }
1071
1072 #[test]
1073 fn test_dirty_page_tracker_mark_all() {
1074 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1075 tracker.mark_all_dirty();
1076 assert_eq!(tracker.dirty_count(), 16);
1077 for i in 0..16 {
1078 assert!(tracker.is_dirty(i));
1079 }
1080 }
1081
1082 #[test]
1083 fn test_dirty_page_tracker_double_mark() {
1084 let mut tracker = DirtyPageTracker::new(0x10000, PAGE_SIZE);
1085 tracker.mark_dirty(5);
1086 tracker.mark_dirty(5); assert_eq!(tracker.dirty_count(), 1);
1088 }
1089
1090 #[test]
1091 fn test_migration_stream_precopy() {
1092 let mut stream = MigrationStream::new(0x10000);
1093 assert_eq!(stream.phase, MigrationPhase::Idle);
1094
1095 stream.start_precopy();
1096 assert_eq!(stream.phase, MigrationPhase::PreCopy);
1097
1098 let pages = stream.send_dirty_pages();
1099 assert_eq!(pages.len(), 16); assert_eq!(stream.rounds, 1);
1101 assert_eq!(stream.total_pages_sent, 16);
1102 }
1103
1104 #[test]
1105 fn test_migration_stream_stop_and_copy() {
1106 let mut stream = MigrationStream::new(0x10000);
1107 stream.start_precopy();
1108 let _ = stream.send_dirty_pages(); stream.tracker.mark_dirty(1);
1112 stream.tracker.mark_dirty(3);
1113
1114 let final_pages = stream.stop_and_copy();
1115 assert_eq!(stream.phase, MigrationPhase::StopAndCopy);
1116 assert_eq!(final_pages.len(), 2);
1117
1118 stream.complete();
1119 assert_eq!(stream.phase, MigrationPhase::Done);
1120 }
1121
1122 #[test]
1123 fn test_migration_stream_should_stop() {
1124 let mut stream = MigrationStream::new(0x10000);
1125 stream.start_precopy();
1126 let _ = stream.send_dirty_pages();
1127 assert!(stream.should_stop_and_copy());
1129 }
1130
1131 #[test]
1132 fn test_migration_stats() {
1133 let mut stream = MigrationStream::new(0x10000);
1134 stream.start_precopy();
1135 let _ = stream.send_dirty_pages();
1136 let stats = stream.stats();
1137 assert_eq!(stats.rounds, 1);
1138 assert_eq!(stats.total_pages_sent, 16);
1139 assert_eq!(stats.total_bytes_sent, 16 * PAGE_SIZE);
1140 }
1141}