1#[cfg(feature = "alloc")]
7extern crate alloc;
8
9use super::{VmError, VmExitReason};
10
11static VMX_STATE: spin::Mutex<Option<VmxState>> = spin::Mutex::new(None);
12
13#[derive(Debug)]
14pub struct VmxState {
15 pub enabled: bool,
16 pub vmxon_region: Option<crate::mm::FrameNumber>,
17 pub revision_id: u32,
18}
19
20pub struct VmcsFields;
22
23#[allow(unused)]
24impl VmcsFields {
25 pub const GUEST_ES_SELECTOR: u32 = 0x0800;
26 pub const GUEST_CS_SELECTOR: u32 = 0x0802;
27 pub const GUEST_SS_SELECTOR: u32 = 0x0804;
28 pub const GUEST_DS_SELECTOR: u32 = 0x0806;
29 pub const GUEST_FS_SELECTOR: u32 = 0x0808;
30 pub const GUEST_GS_SELECTOR: u32 = 0x080A;
31 pub const GUEST_LDTR_SELECTOR: u32 = 0x080C;
32 pub const GUEST_TR_SELECTOR: u32 = 0x080E;
33 pub const HOST_ES_SELECTOR: u32 = 0x0C00;
34 pub const HOST_CS_SELECTOR: u32 = 0x0C02;
35 pub const HOST_SS_SELECTOR: u32 = 0x0C04;
36 pub const HOST_DS_SELECTOR: u32 = 0x0C06;
37 pub const HOST_FS_SELECTOR: u32 = 0x0C08;
38 pub const HOST_GS_SELECTOR: u32 = 0x0C0A;
39 pub const HOST_TR_SELECTOR: u32 = 0x0C0C;
40 pub const IO_BITMAP_A: u32 = 0x2000;
41 pub const IO_BITMAP_B: u32 = 0x2002;
42 pub const MSR_BITMAP: u32 = 0x2004;
43 pub const EPT_POINTER: u32 = 0x201A;
44 pub const GUEST_VMCS_LINK_POINTER: u32 = 0x2800;
45 pub const GUEST_IA32_DEBUGCTL: u32 = 0x2802;
46 pub const GUEST_IA32_PAT: u32 = 0x2804;
47 pub const GUEST_IA32_EFER: u32 = 0x2806;
48 pub const HOST_IA32_PAT: u32 = 0x2C00;
49 pub const HOST_IA32_EFER: u32 = 0x2C02;
50 pub const PIN_BASED_VM_EXEC_CONTROLS: u32 = 0x4000;
51 pub const PRIMARY_PROC_BASED_VM_EXEC_CONTROLS: u32 = 0x4002;
52 pub const EXCEPTION_BITMAP: u32 = 0x4004;
53 pub const VM_EXIT_CONTROLS: u32 = 0x4010;
54 pub const VM_EXIT_MSR_STORE_COUNT: u32 = 0x400E;
55 pub const VM_EXIT_MSR_LOAD_COUNT: u32 = 0x4012;
56 pub const VM_ENTRY_CONTROLS: u32 = 0x4014;
57 pub const VM_ENTRY_MSR_LOAD_COUNT: u32 = 0x4016;
58 pub const VM_ENTRY_INTERRUPTION_INFO: u32 = 0x4018;
59 pub const SECONDARY_PROC_BASED_VM_EXEC_CONTROLS: u32 = 0x401E;
60 pub const GUEST_ES_LIMIT: u32 = 0x4800;
61 pub const GUEST_CS_LIMIT: u32 = 0x4802;
62 pub const GUEST_SS_LIMIT: u32 = 0x4804;
63 pub const GUEST_DS_LIMIT: u32 = 0x4806;
64 pub const GUEST_FS_LIMIT: u32 = 0x4808;
65 pub const GUEST_GS_LIMIT: u32 = 0x480A;
66 pub const GUEST_LDTR_LIMIT: u32 = 0x480C;
67 pub const GUEST_TR_LIMIT: u32 = 0x480E;
68 pub const GUEST_GDTR_LIMIT: u32 = 0x4810;
69 pub const GUEST_IDTR_LIMIT: u32 = 0x4812;
70 pub const GUEST_ES_ACCESS_RIGHTS: u32 = 0x4814;
71 pub const GUEST_CS_ACCESS_RIGHTS: u32 = 0x4816;
72 pub const GUEST_SS_ACCESS_RIGHTS: u32 = 0x4818;
73 pub const GUEST_DS_ACCESS_RIGHTS: u32 = 0x481A;
74 pub const GUEST_FS_ACCESS_RIGHTS: u32 = 0x481C;
75 pub const GUEST_GS_ACCESS_RIGHTS: u32 = 0x481E;
76 pub const GUEST_LDTR_ACCESS_RIGHTS: u32 = 0x4820;
77 pub const GUEST_TR_ACCESS_RIGHTS: u32 = 0x4822;
78 pub const GUEST_INTERRUPTIBILITY_STATE: u32 = 0x4824;
79 pub const GUEST_ACTIVITY_STATE: u32 = 0x4826;
80 pub const GUEST_SYSENTER_CS: u32 = 0x482A;
81 pub const VM_EXIT_REASON: u32 = 0x4402;
82 pub const VM_EXIT_INTERRUPTION_INFO: u32 = 0x4404;
83 pub const VM_EXIT_INTERRUPTION_ERROR_CODE: u32 = 0x4406;
84 pub const VM_EXIT_INSTRUCTION_LENGTH: u32 = 0x440C;
85 pub const VM_EXIT_INSTRUCTION_INFO: u32 = 0x440E;
86 pub const GUEST_CR0: u32 = 0x6800;
87 pub const GUEST_CR3: u32 = 0x6802;
88 pub const GUEST_CR4: u32 = 0x6804;
89 pub const GUEST_ES_BASE: u32 = 0x6806;
90 pub const GUEST_CS_BASE: u32 = 0x6808;
91 pub const GUEST_SS_BASE: u32 = 0x680A;
92 pub const GUEST_DS_BASE: u32 = 0x680C;
93 pub const GUEST_FS_BASE: u32 = 0x680E;
94 pub const GUEST_GS_BASE: u32 = 0x6810;
95 pub const GUEST_LDTR_BASE: u32 = 0x6812;
96 pub const GUEST_TR_BASE: u32 = 0x6814;
97 pub const GUEST_GDTR_BASE: u32 = 0x6816;
98 pub const GUEST_IDTR_BASE: u32 = 0x6818;
99 pub const GUEST_DR7: u32 = 0x681A;
100 pub const GUEST_RSP: u32 = 0x681C;
101 pub const GUEST_RIP: u32 = 0x681E;
102 pub const GUEST_RFLAGS: u32 = 0x6820;
103 pub const GUEST_SYSENTER_ESP: u32 = 0x6824;
104 pub const GUEST_SYSENTER_EIP: u32 = 0x6826;
105 pub const HOST_CR0: u32 = 0x6C00;
106 pub const HOST_CR3: u32 = 0x6C02;
107 pub const HOST_CR4: u32 = 0x6C04;
108 pub const HOST_FS_BASE: u32 = 0x6C06;
109 pub const HOST_GS_BASE: u32 = 0x6C08;
110 pub const HOST_TR_BASE: u32 = 0x6C0A;
111 pub const HOST_GDTR_BASE: u32 = 0x6C0C;
112 pub const HOST_IDTR_BASE: u32 = 0x6C0E;
113 pub const HOST_IA32_SYSENTER_ESP: u32 = 0x6C10;
114 pub const HOST_IA32_SYSENTER_EIP: u32 = 0x6C12;
115 pub const HOST_RSP: u32 = 0x6C14;
116 pub const HOST_RIP: u32 = 0x6C16;
117 pub const EXIT_QUALIFICATION: u32 = 0x6400;
118 pub const GUEST_LINEAR_ADDRESS: u32 = 0x640A;
119 pub const GUEST_PHYSICAL_ADDRESS: u32 = 0x2400;
120}
121
122#[cfg(target_arch = "x86_64")]
123const IA32_VMX_BASIC: u32 = 0x480;
124#[cfg(target_arch = "x86_64")]
125const IA32_VMX_CR0_FIXED0: u32 = 0x486;
126#[cfg(target_arch = "x86_64")]
127const IA32_VMX_CR0_FIXED1: u32 = 0x487;
128#[cfg(target_arch = "x86_64")]
129const IA32_FEATURE_CONTROL: u32 = 0x3A;
130#[cfg(target_arch = "x86_64")]
131const IA32_VMX_PINBASED_CTLS: u32 = 0x481;
132#[cfg(target_arch = "x86_64")]
133const IA32_VMX_PROCBASED_CTLS: u32 = 0x482;
134#[cfg(target_arch = "x86_64")]
135const IA32_VMX_EXIT_CTLS: u32 = 0x483;
136#[cfg(target_arch = "x86_64")]
137const IA32_VMX_ENTRY_CTLS: u32 = 0x484;
138#[cfg(target_arch = "x86_64")]
139const IA32_VMX_PROCBASED_CTLS2: u32 = 0x48B;
140#[cfg(target_arch = "x86_64")]
141const CR4_VMXE: u64 = 1 << 13;
142
143pub struct Vmcs {
144 frame: crate::mm::FrameNumber,
145 active: bool,
146}
147
148impl Vmcs {
149 #[cfg(target_arch = "x86_64")]
150 pub fn allocate() -> Result<Self, VmError> {
151 use crate::mm::frame_allocator::FRAME_ALLOCATOR;
152 let frame = {
153 let allocator = FRAME_ALLOCATOR.lock();
154 allocator
155 .allocate_frames(1, None)
156 .map_err(|_| VmError::VmcsAllocationFailed)?
157 };
158 let phys_addr = frame.as_u64() * crate::mm::FRAME_SIZE as u64;
159 let virt_addr = crate::mm::phys_to_virt_addr(phys_addr);
160 unsafe {
162 core::ptr::write_bytes(virt_addr as *mut u8, 0, crate::mm::FRAME_SIZE);
163 let vmx_basic = super::read_msr(IA32_VMX_BASIC);
164 let revision_id = (vmx_basic & 0x7FFF_FFFF) as u32;
165 core::ptr::write_volatile(virt_addr as *mut u32, revision_id);
166 }
167 Ok(Self {
168 frame,
169 active: false,
170 })
171 }
172
173 #[cfg(not(target_arch = "x86_64"))]
174 pub fn allocate() -> Result<Self, VmError> {
175 Err(VmError::VmxNotSupported)
176 }
177
178 pub fn physical_address(&self) -> u64 {
179 self.frame.as_u64() * crate::mm::FRAME_SIZE as u64
180 }
181
182 #[cfg(target_arch = "x86_64")]
183 pub fn clear(&mut self) -> Result<(), VmError> {
184 let phys_addr = self.physical_address();
185 unsafe {
187 let success: u8;
188 core::arch::asm!(
189 "vmclear [{addr}]", "setna {success}",
190 addr = in(reg) &phys_addr as *const u64,
191 success = out(reg_byte) success, options(nostack),
192 );
193 if success != 0 {
194 return Err(VmError::VmxOperationFailed);
195 }
196 }
197 self.active = false;
198 Ok(())
199 }
200
201 #[cfg(not(target_arch = "x86_64"))]
202 pub fn clear(&mut self) -> Result<(), VmError> {
203 Err(VmError::VmxNotSupported)
204 }
205
206 #[cfg(target_arch = "x86_64")]
207 pub fn load(&mut self) -> Result<(), VmError> {
208 let phys_addr = self.physical_address();
209 unsafe {
211 let success: u8;
212 core::arch::asm!(
213 "vmptrld [{addr}]", "setna {success}",
214 addr = in(reg) &phys_addr as *const u64,
215 success = out(reg_byte) success, options(nostack),
216 );
217 if success != 0 {
218 return Err(VmError::VmxOperationFailed);
219 }
220 }
221 self.active = true;
222 Ok(())
223 }
224
225 #[cfg(not(target_arch = "x86_64"))]
226 pub fn load(&mut self) -> Result<(), VmError> {
227 Err(VmError::VmxNotSupported)
228 }
229
230 #[cfg(target_arch = "x86_64")]
231 pub fn write_field(&self, field: u32, value: u64) -> Result<(), VmError> {
232 if !self.active {
233 return Err(VmError::VmcsFieldError);
234 }
235 unsafe {
237 let success: u8;
238 core::arch::asm!(
239 "vmwrite {field}, {value}", "setna {success}",
240 field = in(reg) field as u64, value = in(reg) value,
241 success = out(reg_byte) success, options(nostack, nomem),
242 );
243 if success != 0 {
244 return Err(VmError::VmcsFieldError);
245 }
246 }
247 Ok(())
248 }
249
250 #[cfg(not(target_arch = "x86_64"))]
251 pub fn write_field(&self, _field: u32, _value: u64) -> Result<(), VmError> {
252 Err(VmError::VmxNotSupported)
253 }
254
255 #[cfg(target_arch = "x86_64")]
256 pub fn read_field(&self, field: u32) -> Result<u64, VmError> {
257 if !self.active {
258 return Err(VmError::VmcsFieldError);
259 }
260 let value: u64;
261 unsafe {
263 let success: u8;
264 core::arch::asm!(
265 "vmread {value}, {field}", "setna {success}",
266 field = in(reg) field as u64, value = out(reg) value,
267 success = out(reg_byte) success, options(nostack, nomem),
268 );
269 if success != 0 {
270 return Err(VmError::VmcsFieldError);
271 }
272 }
273 Ok(value)
274 }
275
276 #[cfg(not(target_arch = "x86_64"))]
277 pub fn read_field(&self, _field: u32) -> Result<u64, VmError> {
278 Err(VmError::VmxNotSupported)
279 }
280
281 pub fn is_active(&self) -> bool {
282 self.active
283 }
284 pub fn frame(&self) -> crate::mm::FrameNumber {
285 self.frame
286 }
287}
288
289#[cfg(target_arch = "x86_64")]
292pub fn vmx_enable() -> Result<(), VmError> {
293 use crate::mm::frame_allocator::FRAME_ALLOCATOR;
294 {
295 let state = VMX_STATE.lock();
296 if let Some(ref s) = *state {
297 if s.enabled {
298 return Err(VmError::VmxAlreadyEnabled);
299 }
300 }
301 }
302 if !super::cpu_supports_vmx() {
303 return Err(VmError::VmxNotSupported);
304 }
305
306 let feature_control = unsafe { super::read_msr(IA32_FEATURE_CONTROL) };
307 let lock_bit = feature_control & 1;
308 let vmx_outside_smx = (feature_control >> 2) & 1;
309 if lock_bit != 0 && vmx_outside_smx == 0 {
310 return Err(VmError::VmxNotSupported);
311 }
312 if lock_bit == 0 {
313 unsafe { super::write_msr(IA32_FEATURE_CONTROL, feature_control | (1 << 2) | 1) };
314 }
315
316 let vmx_basic = unsafe { super::read_msr(IA32_VMX_BASIC) };
317 let revision_id = (vmx_basic & 0x7FFF_FFFF) as u32;
318
319 let vmxon_frame = {
320 let allocator = FRAME_ALLOCATOR.lock();
321 allocator
322 .allocate_frames(1, None)
323 .map_err(|_| VmError::VmcsAllocationFailed)?
324 };
325 let vmxon_phys = vmxon_frame.as_u64() * crate::mm::FRAME_SIZE as u64;
326 let vmxon_virt = crate::mm::phys_to_virt_addr(vmxon_phys);
327 unsafe {
329 core::ptr::write_bytes(vmxon_virt as *mut u8, 0, crate::mm::FRAME_SIZE);
330 core::ptr::write_volatile(vmxon_virt as *mut u32, revision_id);
331 }
332
333 unsafe {
335 let cr4: u64;
336 core::arch::asm!("mov {}, cr4", out(reg) cr4, options(nostack, nomem));
337 core::arch::asm!("mov cr4, {}", in(reg) cr4 | CR4_VMXE, options(nostack, nomem));
338 }
339
340 unsafe {
342 let fixed0 = super::read_msr(IA32_VMX_CR0_FIXED0);
343 let fixed1 = super::read_msr(IA32_VMX_CR0_FIXED1);
344 let cr0: u64;
345 core::arch::asm!("mov {}, cr0", out(reg) cr0, options(nostack, nomem));
346 core::arch::asm!("mov cr0, {}", in(reg) (cr0 | fixed0) & fixed1, options(nostack, nomem));
347 }
348
349 unsafe {
351 let success: u8;
352 core::arch::asm!(
353 "vmxon [{addr}]", "setna {success}",
354 addr = in(reg) &vmxon_phys as *const u64,
355 success = out(reg_byte) success, options(nostack),
356 );
357 if success != 0 {
358 let cr4: u64;
359 core::arch::asm!("mov {}, cr4", out(reg) cr4, options(nostack, nomem));
360 core::arch::asm!("mov cr4, {}", in(reg) cr4 & !CR4_VMXE, options(nostack, nomem));
361 return Err(VmError::VmxOperationFailed);
362 }
363 }
364
365 let mut state = VMX_STATE.lock();
366 *state = Some(VmxState {
367 enabled: true,
368 vmxon_region: Some(vmxon_frame),
369 revision_id,
370 });
371 crate::println!(" [vmx] VMX enabled (revision 0x{:08x})", revision_id);
372 Ok(())
373}
374
375#[cfg(not(target_arch = "x86_64"))]
376pub fn vmx_enable() -> Result<(), VmError> {
377 Err(VmError::VmxNotSupported)
378}
379
380#[cfg(target_arch = "x86_64")]
381pub fn vmx_disable() -> Result<(), VmError> {
382 let mut state = VMX_STATE.lock();
383 match state.as_ref() {
384 Some(s) if s.enabled => {}
385 _ => return Ok(()),
386 }
387 unsafe {
389 core::arch::asm!("vmxoff", options(nostack, nomem));
390 let cr4: u64;
391 core::arch::asm!("mov {}, cr4", out(reg) cr4, options(nostack, nomem));
392 core::arch::asm!("mov cr4, {}", in(reg) cr4 & !CR4_VMXE, options(nostack, nomem));
393 }
394 if let Some(ref mut s) = *state {
395 s.enabled = false;
396 }
397 crate::println!(" [vmx] VMX disabled");
398 Ok(())
399}
400
401#[cfg(not(target_arch = "x86_64"))]
402pub fn vmx_disable() -> Result<(), VmError> {
403 Err(VmError::VmxNotSupported)
404}
405
406pub fn is_vmx_enabled() -> bool {
407 let state = VMX_STATE.lock();
408 state.as_ref().is_some_and(|s| s.enabled)
409}
410
411pub fn vmcs_revision_id() -> Option<u32> {
412 let state = VMX_STATE.lock();
413 state.as_ref().map(|s| s.revision_id)
414}
415
416#[cfg(target_arch = "x86_64")]
417fn adjust_controls(msr: u32, desired: u32) -> u32 {
418 let msr_val = unsafe { super::read_msr(msr) };
419 let required = msr_val as u32;
420 let allowed = (msr_val >> 32) as u32;
421 (desired | required) & allowed
422}
423
424#[cfg(target_arch = "x86_64")]
425pub fn setup_vmcs(vmcs: &Vmcs, guest_entry: u64, guest_stack: u64) -> Result<(), VmError> {
426 if !vmcs.is_active() {
427 return Err(VmError::VmcsFieldError);
428 }
429
430 let host_cr0: u64;
431 let host_cr3: u64;
432 let host_cr4: u64;
433 unsafe {
435 core::arch::asm!("mov {}, cr0", out(reg) host_cr0, options(nostack, nomem));
436 core::arch::asm!("mov {}, cr3", out(reg) host_cr3, options(nostack, nomem));
437 core::arch::asm!("mov {}, cr4", out(reg) host_cr4, options(nostack, nomem));
438 }
439 vmcs.write_field(VmcsFields::HOST_CR0, host_cr0)?;
440 vmcs.write_field(VmcsFields::HOST_CR3, host_cr3)?;
441 vmcs.write_field(VmcsFields::HOST_CR4, host_cr4)?;
442
443 let (cs, ss, ds, es, fs, gs, tr): (u16, u16, u16, u16, u16, u16, u16);
444 unsafe {
446 core::arch::asm!("mov {:x}, cs", out(reg) cs, options(nostack, nomem));
447 core::arch::asm!("mov {:x}, ss", out(reg) ss, options(nostack, nomem));
448 core::arch::asm!("mov {:x}, ds", out(reg) ds, options(nostack, nomem));
449 core::arch::asm!("mov {:x}, es", out(reg) es, options(nostack, nomem));
450 core::arch::asm!("mov {:x}, fs", out(reg) fs, options(nostack, nomem));
451 core::arch::asm!("mov {:x}, gs", out(reg) gs, options(nostack, nomem));
452 core::arch::asm!("str {:x}", out(reg) tr, options(nostack, nomem));
453 }
454 vmcs.write_field(VmcsFields::HOST_CS_SELECTOR, cs as u64)?;
455 vmcs.write_field(VmcsFields::HOST_SS_SELECTOR, ss as u64)?;
456 vmcs.write_field(VmcsFields::HOST_DS_SELECTOR, ds as u64)?;
457 vmcs.write_field(VmcsFields::HOST_ES_SELECTOR, es as u64)?;
458 vmcs.write_field(VmcsFields::HOST_FS_SELECTOR, fs as u64)?;
459 vmcs.write_field(VmcsFields::HOST_GS_SELECTOR, gs as u64)?;
460 vmcs.write_field(VmcsFields::HOST_TR_SELECTOR, tr as u64)?;
461
462 let gdtr: [u8; 10] = [0; 10];
463 let idtr: [u8; 10] = [0; 10];
464 unsafe {
466 core::arch::asm!("sgdt [{}]", in(reg) &gdtr as *const _, options(nostack));
467 core::arch::asm!("sidt [{}]", in(reg) &idtr as *const _, options(nostack));
468 }
469 let gdt_base = u64::from_le_bytes(gdtr[2..10].try_into().unwrap_or([0; 8]));
470 let idt_base = u64::from_le_bytes(idtr[2..10].try_into().unwrap_or([0; 8]));
471 vmcs.write_field(VmcsFields::HOST_GDTR_BASE, gdt_base)?;
472 vmcs.write_field(VmcsFields::HOST_IDTR_BASE, idt_base)?;
473 vmcs.write_field(VmcsFields::HOST_RIP, vm_exit_handler as *const () as u64)?;
474 vmcs.write_field(VmcsFields::HOST_FS_BASE, 0)?;
475 vmcs.write_field(VmcsFields::HOST_GS_BASE, 0)?;
476 vmcs.write_field(VmcsFields::HOST_TR_BASE, 0)?;
477 vmcs.write_field(VmcsFields::HOST_IA32_SYSENTER_ESP, 0)?;
478 vmcs.write_field(VmcsFields::HOST_IA32_SYSENTER_EIP, 0)?;
479
480 vmcs.write_field(VmcsFields::GUEST_CR0, host_cr0)?;
482 vmcs.write_field(VmcsFields::GUEST_CR3, 0)?;
483 vmcs.write_field(VmcsFields::GUEST_CR4, host_cr4 & !CR4_VMXE)?;
484
485 let cs_ar: u64 = 0xA09B;
486 let ds_ar: u64 = 0xC093;
487 let tr_ar: u64 = 0x008B;
488 let ldtr_ar: u64 = 0x10000;
489
490 vmcs.write_field(VmcsFields::GUEST_CS_SELECTOR, 0x08)?;
491 vmcs.write_field(VmcsFields::GUEST_CS_BASE, 0)?;
492 vmcs.write_field(VmcsFields::GUEST_CS_LIMIT, 0xFFFF_FFFF)?;
493 vmcs.write_field(VmcsFields::GUEST_CS_ACCESS_RIGHTS, cs_ar)?;
494
495 for (sel, base, limit, ar) in [
496 (
497 VmcsFields::GUEST_SS_SELECTOR,
498 VmcsFields::GUEST_SS_BASE,
499 VmcsFields::GUEST_SS_LIMIT,
500 VmcsFields::GUEST_SS_ACCESS_RIGHTS,
501 ),
502 (
503 VmcsFields::GUEST_DS_SELECTOR,
504 VmcsFields::GUEST_DS_BASE,
505 VmcsFields::GUEST_DS_LIMIT,
506 VmcsFields::GUEST_DS_ACCESS_RIGHTS,
507 ),
508 (
509 VmcsFields::GUEST_ES_SELECTOR,
510 VmcsFields::GUEST_ES_BASE,
511 VmcsFields::GUEST_ES_LIMIT,
512 VmcsFields::GUEST_ES_ACCESS_RIGHTS,
513 ),
514 (
515 VmcsFields::GUEST_FS_SELECTOR,
516 VmcsFields::GUEST_FS_BASE,
517 VmcsFields::GUEST_FS_LIMIT,
518 VmcsFields::GUEST_FS_ACCESS_RIGHTS,
519 ),
520 (
521 VmcsFields::GUEST_GS_SELECTOR,
522 VmcsFields::GUEST_GS_BASE,
523 VmcsFields::GUEST_GS_LIMIT,
524 VmcsFields::GUEST_GS_ACCESS_RIGHTS,
525 ),
526 ] {
527 vmcs.write_field(sel, 0x10)?;
528 vmcs.write_field(base, 0)?;
529 vmcs.write_field(limit, 0xFFFF_FFFF)?;
530 vmcs.write_field(ar, ds_ar)?;
531 }
532
533 vmcs.write_field(VmcsFields::GUEST_TR_SELECTOR, 0x18)?;
534 vmcs.write_field(VmcsFields::GUEST_TR_BASE, 0)?;
535 vmcs.write_field(VmcsFields::GUEST_TR_LIMIT, 0x67)?;
536 vmcs.write_field(VmcsFields::GUEST_TR_ACCESS_RIGHTS, tr_ar)?;
537 vmcs.write_field(VmcsFields::GUEST_LDTR_SELECTOR, 0)?;
538 vmcs.write_field(VmcsFields::GUEST_LDTR_BASE, 0)?;
539 vmcs.write_field(VmcsFields::GUEST_LDTR_LIMIT, 0)?;
540 vmcs.write_field(VmcsFields::GUEST_LDTR_ACCESS_RIGHTS, ldtr_ar)?;
541 vmcs.write_field(VmcsFields::GUEST_GDTR_BASE, 0)?;
542 vmcs.write_field(VmcsFields::GUEST_GDTR_LIMIT, 0)?;
543 vmcs.write_field(VmcsFields::GUEST_IDTR_BASE, 0)?;
544 vmcs.write_field(VmcsFields::GUEST_IDTR_LIMIT, 0)?;
545 vmcs.write_field(VmcsFields::GUEST_DR7, 0x400)?;
546 vmcs.write_field(VmcsFields::GUEST_RFLAGS, 0x2)?;
547 vmcs.write_field(VmcsFields::GUEST_RIP, guest_entry)?;
548 vmcs.write_field(VmcsFields::GUEST_RSP, guest_stack)?;
549 vmcs.write_field(VmcsFields::GUEST_INTERRUPTIBILITY_STATE, 0)?;
550 vmcs.write_field(VmcsFields::GUEST_ACTIVITY_STATE, 0)?;
551 vmcs.write_field(VmcsFields::GUEST_VMCS_LINK_POINTER, 0xFFFF_FFFF_FFFF_FFFF)?;
552 vmcs.write_field(VmcsFields::GUEST_SYSENTER_CS, 0)?;
553 vmcs.write_field(VmcsFields::GUEST_SYSENTER_ESP, 0)?;
554 vmcs.write_field(VmcsFields::GUEST_SYSENTER_EIP, 0)?;
555
556 let pin_based = adjust_controls(IA32_VMX_PINBASED_CTLS, 0x0000_0001);
558 vmcs.write_field(VmcsFields::PIN_BASED_VM_EXEC_CONTROLS, pin_based as u64)?;
559 let primary_proc = adjust_controls(
560 IA32_VMX_PROCBASED_CTLS,
561 (1 << 7) | (1 << 24) | (1 << 28) | (1 << 31),
562 );
563 vmcs.write_field(
564 VmcsFields::PRIMARY_PROC_BASED_VM_EXEC_CONTROLS,
565 primary_proc as u64,
566 )?;
567 let secondary_proc = adjust_controls(IA32_VMX_PROCBASED_CTLS2, (1 << 1) | (1 << 7));
568 vmcs.write_field(
569 VmcsFields::SECONDARY_PROC_BASED_VM_EXEC_CONTROLS,
570 secondary_proc as u64,
571 )?;
572 vmcs.write_field(VmcsFields::EXCEPTION_BITMAP, 0)?;
573
574 let exit_controls = adjust_controls(IA32_VMX_EXIT_CTLS, 1 << 9);
575 vmcs.write_field(VmcsFields::VM_EXIT_CONTROLS, exit_controls as u64)?;
576 vmcs.write_field(VmcsFields::VM_EXIT_MSR_STORE_COUNT, 0)?;
577 vmcs.write_field(VmcsFields::VM_EXIT_MSR_LOAD_COUNT, 0)?;
578
579 let entry_controls = adjust_controls(IA32_VMX_ENTRY_CTLS, 1 << 9);
580 vmcs.write_field(VmcsFields::VM_ENTRY_CONTROLS, entry_controls as u64)?;
581 vmcs.write_field(VmcsFields::VM_ENTRY_MSR_LOAD_COUNT, 0)?;
582 vmcs.write_field(VmcsFields::VM_ENTRY_INTERRUPTION_INFO, 0)?;
583
584 Ok(())
585}
586
587#[cfg(not(target_arch = "x86_64"))]
588pub fn setup_vmcs(_vmcs: &Vmcs, _guest_entry: u64, _guest_stack: u64) -> Result<(), VmError> {
589 Err(VmError::VmxNotSupported)
590}
591
592#[cfg(target_arch = "x86_64")]
593pub fn vm_launch() -> Result<VmExitReason, VmError> {
594 let success: u8;
596 unsafe {
597 core::arch::asm!("vmlaunch", "setna {success}", success = out(reg_byte) success, options(nostack));
598 }
599 if success != 0 {
600 return Err(VmError::VmEntryFailed);
601 }
602 Err(VmError::VmEntryFailed)
603}
604
605#[cfg(not(target_arch = "x86_64"))]
606pub fn vm_launch() -> Result<VmExitReason, VmError> {
607 Err(VmError::VmxNotSupported)
608}
609
610#[cfg(target_arch = "x86_64")]
611pub fn vm_resume() -> Result<VmExitReason, VmError> {
612 let success: u8;
614 unsafe {
615 core::arch::asm!("vmresume", "setna {success}", success = out(reg_byte) success, options(nostack));
616 }
617 if success != 0 {
618 return Err(VmError::VmEntryFailed);
619 }
620 Err(VmError::VmEntryFailed)
621}
622
623#[cfg(not(target_arch = "x86_64"))]
624pub fn vm_resume() -> Result<VmExitReason, VmError> {
625 Err(VmError::VmxNotSupported)
626}
627
628#[cfg(target_arch = "x86_64")]
629extern "C" fn vm_exit_handler() {
630 let raw_reason: u64;
631 unsafe {
633 let _success: u8;
634 core::arch::asm!(
635 "vmread {value}, {field}", "setna {success}",
636 field = in(reg) VmcsFields::VM_EXIT_REASON as u64,
637 value = out(reg) raw_reason,
638 success = out(reg_byte) _success,
639 options(nostack, nomem),
640 );
641 }
642 let reason = VmExitReason::from_raw(raw_reason as u32);
643 let _ = handle_vm_exit(reason);
644}
645
646pub fn handle_vm_exit(reason: VmExitReason) -> Result<(), VmError> {
647 match reason {
648 VmExitReason::Cpuid => {
649 crate::println!(" [vmx] VM exit: CPUID");
650 Ok(())
651 }
652 VmExitReason::Hlt => {
653 crate::println!(" [vmx] VM exit: HLT");
654 Ok(())
655 }
656 VmExitReason::IoInstruction => {
657 crate::println!(" [vmx] VM exit: I/O");
658 Ok(())
659 }
660 VmExitReason::Rdmsr | VmExitReason::Wrmsr => {
661 crate::println!(" [vmx] VM exit: MSR");
662 Ok(())
663 }
664 VmExitReason::EptViolation => {
665 crate::println!(" [vmx] VM exit: EPT violation");
666 Ok(())
667 }
668 VmExitReason::ExternalInterrupt => Ok(()),
669 VmExitReason::TripleFault => {
670 crate::println!(" [vmx] VM exit: Triple fault");
671 Err(VmError::VmExitHandlerError)
672 }
673 VmExitReason::EntryFailInvalidGuestState => {
674 crate::println!(" [vmx] VM exit: Invalid guest state");
675 Err(VmError::InvalidGuestState)
676 }
677 VmExitReason::Vmcall => {
678 crate::println!(" [vmx] VM exit: VMCALL");
679 Ok(())
680 }
681 _ => {
682 crate::println!(" [vmx] VM exit: unhandled {:?}", reason);
683 Err(VmError::VmExitHandlerError)
684 }
685 }
686}
687
688pub fn vmx_status() -> (bool, bool, Option<u32>) {
689 (
690 super::cpu_supports_vmx(),
691 is_vmx_enabled(),
692 vmcs_revision_id(),
693 )
694}
695
696#[cfg(test)]
697mod tests {
698 use super::*;
699
700 #[test]
701 fn test_vmcs_field_constants() {
702 assert_eq!(VmcsFields::GUEST_RIP, 0x681E);
703 assert_eq!(VmcsFields::GUEST_RSP, 0x681C);
704 assert_eq!(VmcsFields::HOST_RIP, 0x6C16);
705 assert_eq!(VmcsFields::HOST_RSP, 0x6C14);
706 assert_eq!(VmcsFields::VM_EXIT_REASON, 0x4402);
707 assert_eq!(VmcsFields::EPT_POINTER, 0x201A);
708 }
709
710 #[test]
711 fn test_vmx_state_initial() {
712 assert!(!is_vmx_enabled());
713 assert!(vmcs_revision_id().is_none());
714 }
715
716 #[test]
717 fn test_handle_vm_exit_cpuid() {
718 assert!(handle_vm_exit(VmExitReason::Cpuid).is_ok());
719 }
720
721 #[test]
722 fn test_handle_vm_exit_triple_fault() {
723 assert!(handle_vm_exit(VmExitReason::TripleFault).is_err());
724 }
725
726 #[test]
727 fn test_handle_vm_exit_hlt() {
728 assert!(handle_vm_exit(VmExitReason::Hlt).is_ok());
729 }
730}