veridian_kernel/virt/hypervisor/
migration.rs1#[cfg(feature = "alloc")]
6use alloc::{vec, vec::Vec};
7
8use super::{BITS_PER_U64, PAGE_SIZE, PRECOPY_BATCH_SIZE};
9use crate::virt::{vmx::VmcsFields, VmError};
10
11#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
17pub enum MigrationState {
18 #[default]
20 Idle,
21 Setup,
23 PreCopy,
25 StopAndCopy,
27 Completing,
29 Complete,
31 Failed,
33}
34
35#[derive(Debug, Clone, Copy, PartialEq, Eq)]
37pub enum VmcsFieldGroup {
38 GuestRegisterState,
40 GuestSegmentState,
42 GuestControlState,
44 HostState,
46 ExecutionControls,
48 ExitEntryControls,
50 ReadOnlyData,
52}
53
54#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub struct SerializedVmcsField {
57 pub encoding: u32,
58 pub value: u64,
59}
60
61#[cfg(feature = "alloc")]
63pub struct SerializedVmcs {
64 pub fields: Vec<SerializedVmcsField>,
65}
66
67#[cfg(feature = "alloc")]
68impl Default for SerializedVmcs {
69 fn default() -> Self {
70 Self::new()
71 }
72}
73
74impl SerializedVmcs {
75 pub fn new() -> Self {
76 Self { fields: Vec::new() }
77 }
78
79 pub fn add_field(&mut self, encoding: u32, value: u64) {
80 self.fields.push(SerializedVmcsField { encoding, value });
81 }
82
83 pub fn field_count(&self) -> usize {
84 self.fields.len()
85 }
86
87 pub fn find_field(&self, encoding: u32) -> Option<u64> {
88 self.fields
89 .iter()
90 .find(|f| f.encoding == encoding)
91 .map(|f| f.value)
92 }
93
94 pub fn serialize_guest_registers() -> &'static [u32] {
96 &[
97 VmcsFields::GUEST_RIP,
98 VmcsFields::GUEST_RSP,
99 VmcsFields::GUEST_RFLAGS,
100 VmcsFields::GUEST_CR0,
101 VmcsFields::GUEST_CR3,
102 VmcsFields::GUEST_CR4,
103 VmcsFields::GUEST_DR7,
104 VmcsFields::GUEST_SYSENTER_CS,
105 VmcsFields::GUEST_SYSENTER_ESP,
106 VmcsFields::GUEST_SYSENTER_EIP,
107 VmcsFields::GUEST_IA32_EFER,
108 VmcsFields::GUEST_IA32_PAT,
109 ]
110 }
111
112 pub fn serialize_guest_segments() -> &'static [u32] {
114 &[
115 VmcsFields::GUEST_CS_SELECTOR,
116 VmcsFields::GUEST_CS_BASE,
117 VmcsFields::GUEST_CS_LIMIT,
118 VmcsFields::GUEST_CS_ACCESS_RIGHTS,
119 VmcsFields::GUEST_SS_SELECTOR,
120 VmcsFields::GUEST_SS_BASE,
121 VmcsFields::GUEST_SS_LIMIT,
122 VmcsFields::GUEST_SS_ACCESS_RIGHTS,
123 VmcsFields::GUEST_DS_SELECTOR,
124 VmcsFields::GUEST_DS_BASE,
125 VmcsFields::GUEST_DS_LIMIT,
126 VmcsFields::GUEST_DS_ACCESS_RIGHTS,
127 VmcsFields::GUEST_ES_SELECTOR,
128 VmcsFields::GUEST_ES_BASE,
129 VmcsFields::GUEST_ES_LIMIT,
130 VmcsFields::GUEST_ES_ACCESS_RIGHTS,
131 VmcsFields::GUEST_FS_SELECTOR,
132 VmcsFields::GUEST_FS_BASE,
133 VmcsFields::GUEST_FS_LIMIT,
134 VmcsFields::GUEST_FS_ACCESS_RIGHTS,
135 VmcsFields::GUEST_GS_SELECTOR,
136 VmcsFields::GUEST_GS_BASE,
137 VmcsFields::GUEST_GS_LIMIT,
138 VmcsFields::GUEST_GS_ACCESS_RIGHTS,
139 VmcsFields::GUEST_TR_SELECTOR,
140 VmcsFields::GUEST_TR_BASE,
141 VmcsFields::GUEST_TR_LIMIT,
142 VmcsFields::GUEST_TR_ACCESS_RIGHTS,
143 VmcsFields::GUEST_LDTR_SELECTOR,
144 VmcsFields::GUEST_LDTR_BASE,
145 VmcsFields::GUEST_LDTR_LIMIT,
146 VmcsFields::GUEST_LDTR_ACCESS_RIGHTS,
147 VmcsFields::GUEST_GDTR_BASE,
148 VmcsFields::GUEST_GDTR_LIMIT,
149 VmcsFields::GUEST_IDTR_BASE,
150 VmcsFields::GUEST_IDTR_LIMIT,
151 ]
152 }
153}
154
155#[cfg(feature = "alloc")]
157pub struct DirtyPageBitmap {
158 bitmap: Vec<u64>,
160 total_pages: u64,
162 dirty_count: u64,
164}
165
166#[cfg(feature = "alloc")]
167impl DirtyPageBitmap {
168 pub fn new(total_pages: u64) -> Self {
169 let words = total_pages.div_ceil(BITS_PER_U64) as usize;
170 Self {
171 bitmap: vec![0u64; words],
172 total_pages,
173 dirty_count: 0,
174 }
175 }
176
177 pub fn set_dirty(&mut self, page_index: u64) {
179 if page_index >= self.total_pages {
180 return;
181 }
182 let word = (page_index / BITS_PER_U64) as usize;
183 let bit = page_index % BITS_PER_U64;
184 if self.bitmap[word] & (1u64 << bit) == 0 {
185 self.bitmap[word] |= 1u64 << bit;
186 self.dirty_count += 1;
187 }
188 }
189
190 pub fn is_dirty(&self, page_index: u64) -> bool {
192 if page_index >= self.total_pages {
193 return false;
194 }
195 let word = (page_index / BITS_PER_U64) as usize;
196 let bit = page_index % BITS_PER_U64;
197 self.bitmap[word] & (1u64 << bit) != 0
198 }
199
200 pub fn clear_dirty(&mut self, page_index: u64) {
202 if page_index >= self.total_pages {
203 return;
204 }
205 let word = (page_index / BITS_PER_U64) as usize;
206 let bit = page_index % BITS_PER_U64;
207 if self.bitmap[word] & (1u64 << bit) != 0 {
208 self.bitmap[word] &= !(1u64 << bit);
209 if self.dirty_count > 0 {
210 self.dirty_count -= 1;
211 }
212 }
213 }
214
215 pub fn clear_all(&mut self) -> u64 {
217 let count = self.dirty_count;
218 for word in &mut self.bitmap {
219 *word = 0;
220 }
221 self.dirty_count = 0;
222 count
223 }
224
225 pub fn dirty_pages(&self) -> DirtyPageIter<'_> {
227 DirtyPageIter {
228 bitmap: self,
229 current_word: 0,
230 current_bit: 0,
231 }
232 }
233
234 pub fn dirty_count(&self) -> u64 {
235 self.dirty_count
236 }
237
238 pub fn total_pages(&self) -> u64 {
239 self.total_pages
240 }
241}
242
243#[cfg(feature = "alloc")]
245pub struct DirtyPageIter<'a> {
246 bitmap: &'a DirtyPageBitmap,
247 current_word: usize,
248 current_bit: u64,
249}
250
251#[cfg(feature = "alloc")]
252impl<'a> Iterator for DirtyPageIter<'a> {
253 type Item = u64;
254
255 fn next(&mut self) -> Option<u64> {
256 while self.current_word < self.bitmap.bitmap.len() {
257 let word = self.bitmap.bitmap[self.current_word];
258 while self.current_bit < BITS_PER_U64 {
259 let bit = self.current_bit;
260 self.current_bit += 1;
261 if word & (1u64 << bit) != 0 {
262 let page_idx = (self.current_word as u64)
263 .checked_mul(BITS_PER_U64)
264 .and_then(|v| v.checked_add(bit));
265 if let Some(idx) = page_idx {
266 if idx < self.bitmap.total_pages {
267 return Some(idx);
268 }
269 }
270 }
271 }
272 self.current_word += 1;
273 self.current_bit = 0;
274 }
275 None
276 }
277}
278
279#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
281pub struct MigrationProgress {
282 pub total_bytes: u64,
284 pub transferred_bytes: u64,
286 pub iteration: u32,
288 pub current_dirty_pages: u64,
290 pub previous_dirty_pages: u64,
292 pub bandwidth_bytes_per_ms: u64,
294 pub estimated_remaining_ms: u64,
296}
297
298impl MigrationProgress {
299 pub fn update_bandwidth(&mut self, bytes_sent: u64, elapsed_ms: u64) {
303 if elapsed_ms > 0 {
304 self.bandwidth_bytes_per_ms = bytes_sent / elapsed_ms;
305 }
306 self.transferred_bytes = self.transferred_bytes.saturating_add(bytes_sent);
307 }
308
309 pub fn estimate_remaining(&mut self) {
311 if self.bandwidth_bytes_per_ms > 0 {
312 let remaining = self.total_bytes.saturating_sub(self.transferred_bytes);
313 self.estimated_remaining_ms = remaining / self.bandwidth_bytes_per_ms;
314 }
315 }
316
317 pub fn completion_percent(&self) -> u32 {
319 if self.total_bytes == 0 {
320 return 100;
321 }
322 let percent = self
324 .transferred_bytes
325 .checked_mul(100)
326 .map(|v| v / self.total_bytes)
327 .unwrap_or(100);
328 if percent > 100 {
329 100
330 } else {
331 percent as u32
332 }
333 }
334
335 pub fn has_converged(&self, threshold_percent: u32) -> bool {
338 if self.previous_dirty_pages == 0 {
339 return true;
340 }
341 let threshold_pages = self
343 .previous_dirty_pages
344 .checked_mul((100 - threshold_percent) as u64)
345 .map(|v| v / 100)
346 .unwrap_or(0);
347 self.current_dirty_pages <= threshold_pages
348 }
349}
350
351#[cfg(feature = "alloc")]
353pub struct MigrationController {
354 state: MigrationState,
356 progress: MigrationProgress,
358 dirty_bitmap: Option<DirtyPageBitmap>,
360 vmcs_state: Option<SerializedVmcs>,
362 source_vm_id: u64,
364 convergence_threshold: u32,
366 max_precopy_iterations: u32,
368}
369
370#[cfg(feature = "alloc")]
371impl MigrationController {
372 pub fn new(source_vm_id: u64) -> Self {
373 Self {
374 state: MigrationState::Idle,
375 progress: MigrationProgress::default(),
376 dirty_bitmap: None,
377 vmcs_state: None,
378 source_vm_id,
379 convergence_threshold: 20, max_precopy_iterations: 30,
381 }
382 }
383
384 pub fn begin_setup(&mut self, total_memory_pages: u64) -> Result<(), VmError> {
386 if self.state != MigrationState::Idle {
387 return Err(VmError::InvalidVmState);
388 }
389
390 self.dirty_bitmap = Some(DirtyPageBitmap::new(total_memory_pages));
391 self.progress.total_bytes = total_memory_pages
392 .checked_mul(PAGE_SIZE)
393 .ok_or(VmError::GuestMemoryError)?;
394 self.state = MigrationState::Setup;
395 Ok(())
396 }
397
398 pub fn begin_precopy(&mut self) -> Result<(), VmError> {
400 if self.state != MigrationState::Setup {
401 return Err(VmError::InvalidVmState);
402 }
403
404 if let Some(ref mut bitmap) = self.dirty_bitmap {
406 let total = bitmap.total_pages();
407 for i in 0..total {
408 bitmap.set_dirty(i);
409 }
410 }
411
412 self.progress.iteration = 0;
413 self.state = MigrationState::PreCopy;
414 Ok(())
415 }
416
417 pub fn precopy_iteration(&mut self) -> Result<Vec<u64>, VmError> {
420 if self.state != MigrationState::PreCopy {
421 return Err(VmError::InvalidVmState);
422 }
423
424 let dirty_pages: Vec<u64> = if let Some(ref bitmap) = self.dirty_bitmap {
425 bitmap
426 .dirty_pages()
427 .take(PRECOPY_BATCH_SIZE as usize)
428 .collect()
429 } else {
430 return Err(VmError::InvalidVmState);
431 };
432
433 self.progress.previous_dirty_pages = self.progress.current_dirty_pages;
435 self.progress.current_dirty_pages = if let Some(ref bitmap) = self.dirty_bitmap {
436 bitmap.dirty_count()
437 } else {
438 0
439 };
440 self.progress.iteration += 1;
441
442 if let Some(ref mut bitmap) = self.dirty_bitmap {
444 for &page_idx in &dirty_pages {
445 bitmap.clear_dirty(page_idx);
446 }
447 }
448
449 if self.progress.has_converged(self.convergence_threshold)
451 || self.progress.iteration >= self.max_precopy_iterations
452 {
453 self.state = MigrationState::StopAndCopy;
455 }
456
457 Ok(dirty_pages)
458 }
459
460 pub fn begin_stop_and_copy(&mut self) -> Result<(), VmError> {
462 if self.state != MigrationState::PreCopy && self.state != MigrationState::StopAndCopy {
463 return Err(VmError::InvalidVmState);
464 }
465 self.state = MigrationState::StopAndCopy;
466 Ok(())
467 }
468
469 pub fn serialize_vmcs(&mut self, fields: &[(u32, u64)]) -> Result<(), VmError> {
471 let mut vmcs = SerializedVmcs::new();
472 for &(encoding, value) in fields {
473 vmcs.add_field(encoding, value);
474 }
475 self.vmcs_state = Some(vmcs);
476 Ok(())
477 }
478
479 pub fn final_dirty_pages(&self) -> Result<Vec<u64>, VmError> {
481 if self.state != MigrationState::StopAndCopy {
482 return Err(VmError::InvalidVmState);
483 }
484 if let Some(ref bitmap) = self.dirty_bitmap {
485 Ok(bitmap.dirty_pages().collect())
486 } else {
487 Err(VmError::InvalidVmState)
488 }
489 }
490
491 pub fn complete(&mut self) -> Result<(), VmError> {
493 if self.state != MigrationState::StopAndCopy {
494 return Err(VmError::InvalidVmState);
495 }
496 self.state = MigrationState::Complete;
497 Ok(())
498 }
499
500 pub fn fail(&mut self) {
502 self.state = MigrationState::Failed;
503 }
504
505 pub fn state(&self) -> MigrationState {
506 self.state
507 }
508
509 pub fn progress(&self) -> &MigrationProgress {
510 &self.progress
511 }
512
513 pub fn source_vm_id(&self) -> u64 {
514 self.source_vm_id
515 }
516
517 pub fn vmcs_state(&self) -> Option<&SerializedVmcs> {
518 self.vmcs_state.as_ref()
519 }
520}