veridian_kernel/virt/hypervisor/
nested.rs1#[cfg(feature = "alloc")]
6use alloc::collections::BTreeMap;
7
8use super::GuestRegisters;
9use crate::virt::{vmx::VmcsFields, VmError};
10
11#[cfg(feature = "alloc")]
17pub struct ShadowVmcs {
18 fields: BTreeMap<u32, u64>,
20 active: bool,
22 link_pointer: u64,
24}
25
26#[cfg(feature = "alloc")]
27impl Default for ShadowVmcs {
28 fn default() -> Self {
29 Self::new()
30 }
31}
32
33impl ShadowVmcs {
34 pub fn new() -> Self {
35 Self {
36 fields: BTreeMap::new(),
37 active: false,
38 link_pointer: 0xFFFF_FFFF_FFFF_FFFF,
39 }
40 }
41
42 pub fn write_field(&mut self, field: u32, value: u64) {
44 self.fields.insert(field, value);
45 }
46
47 pub fn read_field(&self, field: u32) -> Option<u64> {
49 self.fields.get(&field).copied()
50 }
51
52 pub fn activate(&mut self, link_pointer: u64) {
54 self.active = true;
55 self.link_pointer = link_pointer;
56 }
57
58 pub fn deactivate(&mut self) {
60 self.active = false;
61 self.link_pointer = 0xFFFF_FFFF_FFFF_FFFF;
62 }
63
64 pub fn is_active(&self) -> bool {
65 self.active
66 }
67
68 pub fn link_pointer(&self) -> u64 {
69 self.link_pointer
70 }
71
72 pub fn field_count(&self) -> usize {
73 self.fields.len()
74 }
75
76 pub fn clear(&mut self) {
78 self.fields.clear();
79 self.active = false;
80 self.link_pointer = 0xFFFF_FFFF_FFFF_FFFF;
81 }
82}
83
84#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
86pub enum NestingLevel {
87 #[default]
89 L0,
90 L1,
92 L2,
94}
95
96#[derive(Debug, Clone, Copy, PartialEq, Eq)]
98pub enum NestedExitReason {
99 Vmcall,
101 EptViolation,
103 IoInstruction,
105 MsrAccess,
107 Cpuid,
109 VmxInstruction,
111 ExternalInterrupt,
113 TripleFault,
115 Hlt,
117 Other(u32),
119}
120
121#[derive(Debug, Clone, Copy, PartialEq, Eq)]
124pub enum FieldForwardPolicy {
125 Passthrough,
127 Emulated,
129 ReadOnly,
131 Hidden,
133}
134
135#[cfg(feature = "alloc")]
137pub struct NestedVirtController {
138 level: NestingLevel,
140 pub(crate) shadow_vmcs: ShadowVmcs,
142 l1_saved_state: GuestRegisters,
144 field_policies: BTreeMap<u32, FieldForwardPolicy>,
146 nested_vmx_enabled: bool,
148}
149
150#[cfg(feature = "alloc")]
151impl Default for NestedVirtController {
152 fn default() -> Self {
153 Self::new()
154 }
155}
156
157impl NestedVirtController {
158 pub fn new() -> Self {
159 let mut policies = BTreeMap::new();
160 policies.insert(VmcsFields::GUEST_RIP, FieldForwardPolicy::Emulated);
162 policies.insert(VmcsFields::GUEST_RSP, FieldForwardPolicy::Emulated);
163 policies.insert(VmcsFields::GUEST_RFLAGS, FieldForwardPolicy::Emulated);
164 policies.insert(VmcsFields::GUEST_CR0, FieldForwardPolicy::Passthrough);
166 policies.insert(VmcsFields::GUEST_CR3, FieldForwardPolicy::Passthrough);
167 policies.insert(VmcsFields::GUEST_CR4, FieldForwardPolicy::Passthrough);
168 policies.insert(VmcsFields::VM_EXIT_REASON, FieldForwardPolicy::ReadOnly);
170 policies.insert(VmcsFields::HOST_RIP, FieldForwardPolicy::Hidden);
172 policies.insert(VmcsFields::HOST_RSP, FieldForwardPolicy::Hidden);
173
174 Self {
175 level: NestingLevel::L0,
176 shadow_vmcs: ShadowVmcs::new(),
177 l1_saved_state: GuestRegisters::default(),
178 field_policies: policies,
179 nested_vmx_enabled: false,
180 }
181 }
182
183 pub fn enable_nested_vmx(&mut self) {
185 self.nested_vmx_enabled = true;
186 }
187
188 pub fn handle_l1_vmwrite(&mut self, field: u32, value: u64) -> Result<(), VmError> {
190 let policy = self
191 .field_policies
192 .get(&field)
193 .copied()
194 .unwrap_or(FieldForwardPolicy::Passthrough);
195
196 match policy {
197 FieldForwardPolicy::Passthrough | FieldForwardPolicy::Emulated => {
198 self.shadow_vmcs.write_field(field, value);
199 Ok(())
200 }
201 FieldForwardPolicy::ReadOnly | FieldForwardPolicy::Hidden => {
202 Err(VmError::VmcsFieldError)
203 }
204 }
205 }
206
207 pub fn handle_l1_vmread(&self, field: u32) -> Result<u64, VmError> {
209 let policy = self
210 .field_policies
211 .get(&field)
212 .copied()
213 .unwrap_or(FieldForwardPolicy::Passthrough);
214
215 match policy {
216 FieldForwardPolicy::Hidden => Err(VmError::VmcsFieldError),
217 _ => self
218 .shadow_vmcs
219 .read_field(field)
220 .ok_or(VmError::VmcsFieldError),
221 }
222 }
223
224 pub fn enter_l2(&mut self, l1_state: &GuestRegisters) -> Result<(), VmError> {
226 if !self.nested_vmx_enabled {
227 return Err(VmError::VmxNotSupported);
228 }
229 if self.level != NestingLevel::L1 {
230 if self.level == NestingLevel::L0 {
232 self.level = NestingLevel::L1;
234 }
235 }
236
237 self.l1_saved_state = *l1_state;
239
240 if self.shadow_vmcs.read_field(VmcsFields::GUEST_RIP).is_none() {
242 return Err(VmError::InvalidGuestState);
243 }
244
245 self.level = NestingLevel::L2;
246 self.shadow_vmcs.activate(0);
247 Ok(())
248 }
249
250 pub fn exit_l2(&mut self, exit_reason: NestedExitReason) -> Result<GuestRegisters, VmError> {
252 if self.level != NestingLevel::L2 {
253 return Err(VmError::InvalidVmState);
254 }
255
256 let reason_code = match exit_reason {
258 NestedExitReason::Vmcall => 18,
259 NestedExitReason::EptViolation => 48,
260 NestedExitReason::IoInstruction => 30,
261 NestedExitReason::MsrAccess => 31,
262 NestedExitReason::Cpuid => 10,
263 NestedExitReason::VmxInstruction => 18,
264 NestedExitReason::ExternalInterrupt => 1,
265 NestedExitReason::TripleFault => 2,
266 NestedExitReason::Hlt => 12,
267 NestedExitReason::Other(code) => code,
268 };
269 self.shadow_vmcs
270 .write_field(VmcsFields::VM_EXIT_REASON, reason_code as u64);
271
272 self.level = NestingLevel::L1;
273 self.shadow_vmcs.deactivate();
274
275 Ok(self.l1_saved_state)
277 }
278
279 pub fn nesting_level(&self) -> NestingLevel {
280 self.level
281 }
282
283 pub fn is_nested_vmx_enabled(&self) -> bool {
284 self.nested_vmx_enabled
285 }
286
287 pub fn should_forward_to_l1(&self, exit_reason: NestedExitReason) -> bool {
289 match exit_reason {
290 NestedExitReason::VmxInstruction => true,
292 NestedExitReason::Vmcall => true,
294 NestedExitReason::TripleFault => true,
296 NestedExitReason::EptViolation => true,
298 NestedExitReason::IoInstruction => true,
300 NestedExitReason::ExternalInterrupt => false,
302 _ => true,
303 }
304 }
305}