veridian_kernel/arch/x86_64/gdt.rs
1// Global Descriptor Table
2
3use lazy_static::lazy_static;
4use x86_64::{
5 structures::{
6 gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector},
7 tss::TaskStateSegment,
8 },
9 VirtAddr,
10};
11
12pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;
13
14lazy_static! {
15 static ref TSS: TaskStateSegment = {
16 let mut tss = TaskStateSegment::new();
17
18 // Set up the kernel stack for privilege level 0
19 // This is used when transitioning from user mode to kernel mode.
20 // Must be 16-byte aligned for the x86_64 ABI (movaps et al.).
21 tss.privilege_stack_table[0] = {
22 const STACK_SIZE: usize = 4096 * 5;
23 #[repr(align(16))]
24 #[allow(dead_code)] // Alignment wrapper -- field accessed via raw pointer
25 struct AlignedStack([u8; STACK_SIZE]);
26 static mut KERNEL_STACK: AlignedStack = AlignedStack([0; STACK_SIZE]);
27
28 let stack_ptr = &raw const KERNEL_STACK;
29 let stack_start = VirtAddr::from_ptr(stack_ptr);
30 stack_start + STACK_SIZE as u64
31 };
32
33 // Set up the double fault stack (16-byte aligned)
34 tss.interrupt_stack_table[DOUBLE_FAULT_IST_INDEX as usize] = {
35 const STACK_SIZE: usize = 4096 * 5;
36 #[repr(align(16))]
37 #[allow(dead_code)] // Alignment wrapper -- field accessed via raw pointer
38 struct AlignedStack([u8; STACK_SIZE]);
39 static mut STACK: AlignedStack = AlignedStack([0; STACK_SIZE]);
40
41 let stack_ptr = &raw const STACK;
42 let stack_start = VirtAddr::from_ptr(stack_ptr);
43 stack_start + STACK_SIZE as u64
44 };
45 tss
46 };
47}
48
49lazy_static! {
50 static ref GDT: (GlobalDescriptorTable, Selectors) = {
51 let mut gdt = GlobalDescriptorTable::new();
52 let code_selector = gdt.append(Descriptor::kernel_code_segment()); // 0x08
53 let data_selector = gdt.append(Descriptor::kernel_data_segment()); // 0x10
54 let tss_selector = gdt.append(Descriptor::tss_segment(&TSS)); // 0x18 (2 entries)
55 let user_data_selector = gdt.append(Descriptor::user_data_segment()); // 0x28 (+ RPL 3 = 0x2B)
56 let user_code_selector = gdt.append(Descriptor::user_code_segment()); // 0x30 (+ RPL 3 = 0x33)
57 (
58 gdt,
59 Selectors {
60 code_selector,
61 data_selector,
62 tss_selector,
63 user_data_selector,
64 user_code_selector,
65 },
66 )
67 };
68}
69
70/// GDT segment selectors for kernel and user mode.
71///
72/// Layout:
73/// - 0x00: Null descriptor
74/// - 0x08: Kernel code segment (Ring 0)
75/// - 0x10: Kernel data segment (Ring 0)
76/// - 0x18: TSS (occupies 2 entries, 0x18-0x20)
77/// - 0x28: User data segment (Ring 3, selector 0x2B with RPL)
78/// - 0x30: User code segment (Ring 3, selector 0x33 with RPL)
79///
80/// The user data/code order matches SYSRET expectations:
81/// SYSRET computes SS = STAR[63:48]+8, CS = STAR[63:48]+16.
82pub struct Selectors {
83 pub code_selector: SegmentSelector,
84 pub data_selector: SegmentSelector,
85 pub tss_selector: SegmentSelector,
86 pub user_data_selector: SegmentSelector,
87 pub user_code_selector: SegmentSelector,
88}
89
90pub fn init() {
91 use x86_64::instructions::{
92 segmentation::{Segment, CS, DS},
93 tables::load_tss,
94 };
95
96 GDT.0.load();
97 // SAFETY: After loading the GDT, segment registers must be updated to reference
98 // the new descriptors. CS must be reloaded via a far return/jump. DS and TSS
99 // are loaded directly. The selectors come from GDT.1 which was computed
100 // from the same GDT we just loaded, so they reference valid descriptors.
101 unsafe {
102 CS::set_reg(GDT.1.code_selector);
103 DS::set_reg(GDT.1.data_selector);
104 load_tss(GDT.1.tss_selector);
105 }
106}
107
108/// Returns a reference to the GDT selectors (kernel and user mode).
109///
110/// Must only be called after `init()` has been called. The lazy_static
111/// ensures the GDT is initialized on first access.
112pub fn selectors() -> &'static Selectors {
113 &GDT.1
114}
115
116/// Update the kernel stack pointer in the TSS (RSP0).
117///
118/// Called during context switch to set the stack used for Ring 3 -> Ring 0
119/// transitions (interrupts, syscalls). Must be called with interrupts disabled.
120///
121/// # Safety
122///
123/// The TSS is a static initialized during boot. Modifying
124/// `privilege_stack_table[0]` via raw pointer is safe because this is only
125/// called from the scheduler with interrupts disabled, ensuring no concurrent
126/// access.
127pub fn set_kernel_stack(stack_top: u64) {
128 unsafe {
129 let tss_ptr = &*TSS as *const TaskStateSegment as *mut TaskStateSegment;
130 (*tss_ptr).privilege_stack_table[0] = VirtAddr::new(stack_top);
131 }
132}
133
134/// Read the current kernel stack pointer from the TSS (RSP0).
135pub fn get_kernel_stack() -> u64 {
136 TSS.privilege_stack_table[0].as_u64()
137}