1#[cfg(feature = "alloc")]
5extern crate alloc;
6
7use super::VmError;
8
9const EPT_ENTRIES_PER_TABLE: usize = 512;
10const PAGE_SIZE: u64 = 4096;
11#[cfg(target_arch = "x86_64")]
12const INDEX_MASK: u64 = 0x1FF;
13
14#[derive(Debug, Clone, Copy, PartialEq, Eq)]
15pub struct EptPermissions {
16 bits: u8,
17}
18
19impl EptPermissions {
20 pub const READ: Self = Self { bits: 1 };
21 pub const WRITE: Self = Self { bits: 2 };
22 pub const EXECUTE: Self = Self { bits: 4 };
23 pub const READ_WRITE: Self = Self { bits: 3 };
24 pub const ALL: Self = Self { bits: 7 };
25 pub const NONE: Self = Self { bits: 0 };
26
27 pub fn read(self) -> bool {
28 self.bits & 1 != 0
29 }
30 pub fn write(self) -> bool {
31 self.bits & 2 != 0
32 }
33 pub fn execute(self) -> bool {
34 self.bits & 4 != 0
35 }
36 pub fn as_bits(self) -> u64 {
37 self.bits as u64
38 }
39 pub fn from_bits(bits: u8) -> Self {
40 Self { bits: bits & 0x7 }
41 }
42}
43
44#[derive(Debug, Clone, Copy, PartialEq, Eq)]
45#[repr(u8)]
46pub enum EptMemoryType {
47 Uncacheable = 0,
48 WriteCombining = 1,
49 WriteThrough = 4,
50 WriteProtected = 5,
51 WriteBack = 6,
52}
53
54#[derive(Clone, Copy)]
55#[repr(transparent)]
56pub struct EptEntry(u64);
57
58impl EptEntry {
59 pub const fn empty() -> Self {
60 Self(0)
61 }
62
63 pub fn new_table(table_phys: u64, perms: EptPermissions) -> Self {
64 Self((table_phys & 0x000F_FFFF_FFFF_F000) | perms.as_bits())
65 }
66
67 pub fn new_page(host_phys: u64, perms: EptPermissions, mem_type: EptMemoryType) -> Self {
68 Self(
69 (host_phys & 0x000F_FFFF_FFFF_F000)
70 | ((mem_type as u64) << 3)
71 | (1 << 6)
72 | perms.as_bits(),
73 )
74 }
75
76 pub fn is_present(self) -> bool {
77 self.0 & 0x7 != 0
78 }
79 pub fn address(self) -> u64 {
80 self.0 & 0x000F_FFFF_FFFF_F000
81 }
82 pub fn permissions(self) -> EptPermissions {
83 EptPermissions::from_bits((self.0 & 0x7) as u8)
84 }
85 pub fn raw(self) -> u64 {
86 self.0
87 }
88 pub fn clear(&mut self) {
89 self.0 = 0;
90 }
91}
92
93impl core::fmt::Debug for EptEntry {
94 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
95 write!(
96 f,
97 "EptEntry(0x{:016x} addr=0x{:x} R={} W={} X={})",
98 self.0,
99 self.address(),
100 u8::from(self.permissions().read()),
101 u8::from(self.permissions().write()),
102 u8::from(self.permissions().execute())
103 )
104 }
105}
106
107#[repr(C, align(4096))]
108pub struct EptTable {
109 entries: [EptEntry; EPT_ENTRIES_PER_TABLE],
110}
111
112impl EptTable {
113 pub const fn new() -> Self {
114 Self {
115 entries: [EptEntry::empty(); EPT_ENTRIES_PER_TABLE],
116 }
117 }
118 pub fn entry(&self, index: usize) -> &EptEntry {
119 &self.entries[index]
120 }
121 pub fn entry_mut(&mut self, index: usize) -> &mut EptEntry {
122 &mut self.entries[index]
123 }
124}
125
126impl Default for EptTable {
127 fn default() -> Self {
128 Self::new()
129 }
130}
131
132pub struct EptManager {
133 pml4_frame: crate::mm::FrameNumber,
134 mapped_pages: u64,
135}
136
137impl EptManager {
138 #[cfg(target_arch = "x86_64")]
139 pub fn new() -> Result<Self, VmError> {
140 use crate::mm::frame_allocator::FRAME_ALLOCATOR;
141 let frame = {
142 let allocator = FRAME_ALLOCATOR.lock();
143 allocator
144 .allocate_frames(1, None)
145 .map_err(|_| VmError::EptMappingFailed)?
146 };
147 let phys = frame.as_u64() * crate::mm::FRAME_SIZE as u64;
148 let virt = crate::mm::phys_to_virt_addr(phys);
149 unsafe {
151 core::ptr::write_bytes(virt as *mut u8, 0, crate::mm::FRAME_SIZE);
152 }
153 Ok(Self {
154 pml4_frame: frame,
155 mapped_pages: 0,
156 })
157 }
158
159 #[cfg(not(target_arch = "x86_64"))]
160 pub fn new() -> Result<Self, VmError> {
161 Err(VmError::VmxNotSupported)
162 }
163
164 #[cfg(target_arch = "x86_64")]
165 pub fn map_page(
166 &mut self,
167 guest_phys: u64,
168 host_phys: u64,
169 perms: EptPermissions,
170 ) -> Result<(), VmError> {
171 self.map_page_with_type(guest_phys, host_phys, perms, EptMemoryType::WriteBack)
172 }
173
174 #[cfg(not(target_arch = "x86_64"))]
175 pub fn map_page(&mut self, _gp: u64, _hp: u64, _p: EptPermissions) -> Result<(), VmError> {
176 Err(VmError::VmxNotSupported)
177 }
178
179 #[cfg(target_arch = "x86_64")]
180 pub fn map_page_with_type(
181 &mut self,
182 guest_phys: u64,
183 host_phys: u64,
184 perms: EptPermissions,
185 mem_type: EptMemoryType,
186 ) -> Result<(), VmError> {
187 use crate::mm::frame_allocator::FRAME_ALLOCATOR;
188 let pml4_idx = ((guest_phys >> 39) & INDEX_MASK) as usize;
189 let pdpt_idx = ((guest_phys >> 30) & INDEX_MASK) as usize;
190 let pd_idx = ((guest_phys >> 21) & INDEX_MASK) as usize;
191 let pt_idx = ((guest_phys >> 12) & INDEX_MASK) as usize;
192
193 let pml4_phys = self.pml4_frame.as_u64() * crate::mm::FRAME_SIZE as u64;
194 let pml4_virt = crate::mm::phys_to_virt_addr(pml4_phys);
195 let pml4_table = unsafe { &mut *(pml4_virt as *mut EptTable) };
197
198 if !pml4_table.entry(pml4_idx).is_present() {
199 let f = {
200 let a = FRAME_ALLOCATOR.lock();
201 a.allocate_frames(1, None)
202 .map_err(|_| VmError::EptMappingFailed)?
203 };
204 let p = f.as_u64() * crate::mm::FRAME_SIZE as u64;
205 let v = crate::mm::phys_to_virt_addr(p);
206 unsafe {
209 core::ptr::write_bytes(v as *mut u8, 0, crate::mm::FRAME_SIZE);
210 }
211 *pml4_table.entry_mut(pml4_idx) = EptEntry::new_table(p, EptPermissions::ALL);
212 }
213
214 let pdpt_phys = pml4_table.entry(pml4_idx).address();
215 let pdpt_table =
218 unsafe { &mut *(crate::mm::phys_to_virt_addr(pdpt_phys) as *mut EptTable) };
219
220 if !pdpt_table.entry(pdpt_idx).is_present() {
221 let f = {
222 let a = FRAME_ALLOCATOR.lock();
223 a.allocate_frames(1, None)
224 .map_err(|_| VmError::EptMappingFailed)?
225 };
226 let p = f.as_u64() * crate::mm::FRAME_SIZE as u64;
227 let v = crate::mm::phys_to_virt_addr(p);
228 unsafe {
231 core::ptr::write_bytes(v as *mut u8, 0, crate::mm::FRAME_SIZE);
232 }
233 *pdpt_table.entry_mut(pdpt_idx) = EptEntry::new_table(p, EptPermissions::ALL);
234 }
235
236 let pd_phys = pdpt_table.entry(pdpt_idx).address();
237 let pd_table = unsafe { &mut *(crate::mm::phys_to_virt_addr(pd_phys) as *mut EptTable) };
240
241 if !pd_table.entry(pd_idx).is_present() {
242 let f = {
243 let a = FRAME_ALLOCATOR.lock();
244 a.allocate_frames(1, None)
245 .map_err(|_| VmError::EptMappingFailed)?
246 };
247 let p = f.as_u64() * crate::mm::FRAME_SIZE as u64;
248 let v = crate::mm::phys_to_virt_addr(p);
249 unsafe {
252 core::ptr::write_bytes(v as *mut u8, 0, crate::mm::FRAME_SIZE);
253 }
254 *pd_table.entry_mut(pd_idx) = EptEntry::new_table(p, EptPermissions::ALL);
255 }
256
257 let pt_phys = pd_table.entry(pd_idx).address();
258 let pt_table = unsafe { &mut *(crate::mm::phys_to_virt_addr(pt_phys) as *mut EptTable) };
261 *pt_table.entry_mut(pt_idx) = EptEntry::new_page(host_phys, perms, mem_type);
262 self.mapped_pages += 1;
263 Ok(())
264 }
265
266 #[cfg(not(target_arch = "x86_64"))]
267 pub fn map_page_with_type(
268 &mut self,
269 _gp: u64,
270 _hp: u64,
271 _p: EptPermissions,
272 _mt: EptMemoryType,
273 ) -> Result<(), VmError> {
274 Err(VmError::VmxNotSupported)
275 }
276
277 #[cfg(target_arch = "x86_64")]
278 pub fn unmap_page(&mut self, guest_phys: u64) -> Result<(), VmError> {
279 let pml4_idx = ((guest_phys >> 39) & INDEX_MASK) as usize;
280 let pdpt_idx = ((guest_phys >> 30) & INDEX_MASK) as usize;
281 let pd_idx = ((guest_phys >> 21) & INDEX_MASK) as usize;
282 let pt_idx = ((guest_phys >> 12) & INDEX_MASK) as usize;
283
284 let pml4_phys = self.pml4_frame.as_u64() * crate::mm::FRAME_SIZE as u64;
285 let pml4_table = unsafe { &*(crate::mm::phys_to_virt_addr(pml4_phys) as *const EptTable) };
288 if !pml4_table.entry(pml4_idx).is_present() {
289 return Ok(());
290 }
291
292 let pdpt_table = unsafe {
295 &*(crate::mm::phys_to_virt_addr(pml4_table.entry(pml4_idx).address())
296 as *const EptTable)
297 };
298 if !pdpt_table.entry(pdpt_idx).is_present() {
299 return Ok(());
300 }
301
302 let pd_table = unsafe {
305 &*(crate::mm::phys_to_virt_addr(pdpt_table.entry(pdpt_idx).address())
306 as *const EptTable)
307 };
308 if !pd_table.entry(pd_idx).is_present() {
309 return Ok(());
310 }
311
312 let pt_table = unsafe {
315 &mut *(crate::mm::phys_to_virt_addr(pd_table.entry(pd_idx).address()) as *mut EptTable)
316 };
317 pt_table.entry_mut(pt_idx).clear();
318 if self.mapped_pages > 0 {
319 self.mapped_pages -= 1;
320 }
321 Ok(())
322 }
323
324 #[cfg(not(target_arch = "x86_64"))]
325 pub fn unmap_page(&mut self, _gp: u64) -> Result<(), VmError> {
326 Err(VmError::VmxNotSupported)
327 }
328
329 pub fn identity_map_range(
330 &mut self,
331 start: u64,
332 end: u64,
333 perms: EptPermissions,
334 ) -> Result<(), VmError> {
335 let mut addr = start & !0xFFF;
336 while addr < end {
337 self.map_page(addr, addr, perms)?;
338 addr += PAGE_SIZE;
339 }
340 Ok(())
341 }
342
343 pub fn eptp(&self) -> u64 {
344 let pml4_phys = self.pml4_frame.as_u64() * crate::mm::FRAME_SIZE as u64;
345 (pml4_phys & 0x000F_FFFF_FFFF_F000) | (3 << 3) | 6
346 }
347
348 pub fn mapped_page_count(&self) -> u64 {
349 self.mapped_pages
350 }
351 pub fn pml4_physical_address(&self) -> u64 {
352 self.pml4_frame.as_u64() * crate::mm::FRAME_SIZE as u64
353 }
354}
355
356#[derive(Debug, Clone, Copy)]
357pub struct EptViolationInfo {
358 pub read: bool,
359 pub write: bool,
360 pub execute: bool,
361 pub ept_readable: bool,
362 pub ept_writable: bool,
363 pub ept_executable: bool,
364 pub guest_physical_addr: u64,
365 pub guest_linear_addr: Option<u64>,
366}
367
368impl EptViolationInfo {
369 pub fn from_exit_qualification(qualification: u64, guest_phys: u64, guest_lin: u64) -> Self {
370 Self {
371 read: qualification & 1 != 0,
372 write: (qualification >> 1) & 1 != 0,
373 execute: (qualification >> 2) & 1 != 0,
374 ept_readable: (qualification >> 3) & 1 != 0,
375 ept_writable: (qualification >> 4) & 1 != 0,
376 ept_executable: (qualification >> 5) & 1 != 0,
377 guest_physical_addr: guest_phys,
378 guest_linear_addr: if (qualification >> 7) & 1 != 0 {
379 Some(guest_lin)
380 } else {
381 None
382 },
383 }
384 }
385}
386
387pub fn handle_ept_violation(info: &EptViolationInfo) -> Result<(), VmError> {
388 crate::println!(
389 " [ept] EPT violation at guest_phys=0x{:x} (R={} W={} X={})",
390 info.guest_physical_addr,
391 info.read as u8,
392 info.write as u8,
393 info.execute as u8
394 );
395 Err(VmError::EptMappingFailed)
396}
397
398#[cfg(test)]
399mod tests {
400 use super::*;
401
402 #[test]
403 fn test_ept_permissions() {
404 assert!(EptPermissions::READ.read());
405 assert!(!EptPermissions::READ.write());
406 assert!(EptPermissions::ALL.read());
407 assert!(EptPermissions::ALL.write());
408 assert!(EptPermissions::ALL.execute());
409 assert_eq!(EptPermissions::ALL.as_bits(), 7);
410 assert_eq!(EptPermissions::NONE.as_bits(), 0);
411 }
412
413 #[test]
414 fn test_ept_entry_empty() {
415 let e = EptEntry::empty();
416 assert!(!e.is_present());
417 assert_eq!(e.address(), 0);
418 }
419
420 #[test]
421 fn test_ept_entry_table() {
422 let e = EptEntry::new_table(0x1000_0000, EptPermissions::ALL);
423 assert!(e.is_present());
424 assert_eq!(e.address(), 0x1000_0000);
425 }
426
427 #[test]
428 fn test_ept_entry_page() {
429 let e = EptEntry::new_page(0x2000_0000, EptPermissions::READ, EptMemoryType::WriteBack);
430 assert!(e.is_present());
431 assert_eq!(e.address(), 0x2000_0000);
432 assert!(e.permissions().read());
433 assert!(!e.permissions().write());
434 }
435
436 #[test]
437 fn test_ept_entry_clear() {
438 let mut e = EptEntry::new_table(0x3000_0000, EptPermissions::ALL);
439 e.clear();
440 assert!(!e.is_present());
441 }
442
443 #[test]
444 fn test_ept_violation_info() {
445 let info = EptViolationInfo::from_exit_qualification(0x81, 0x1000, 0x7FFF_0000);
446 assert!(info.read);
447 assert!(!info.write);
448 assert!(info.guest_linear_addr.is_some());
449 }
450}