1#![allow(clippy::slow_vector_initialization, clippy::unnecessary_cast)]
8
9pub mod dynamic;
10pub mod types;
11
12use alloc::{string::String, vec::Vec};
14use core::{mem, slice};
15
16pub use types::*;
17
18use crate::fs::get_vfs;
19
20pub struct ElfLoader;
22
23impl Default for ElfLoader {
24 fn default() -> Self {
25 Self::new()
26 }
27}
28
29pub fn write_to_user_pages(
36 vas: &crate::mm::VirtualAddressSpace,
37 user_vaddr: u64,
38 data: &[u8],
39) -> Result<(), crate::error::KernelError> {
40 use crate::mm::{phys_to_virt_addr, vas::create_mapper_from_root_pub, VirtualAddress};
41
42 let pt_root = vas.get_page_table();
43 if pt_root == 0 {
44 return Err(crate::error::KernelError::InvalidArgument {
45 name: "vas",
46 value: "page table root is 0",
47 });
48 }
49
50 let mapper = unsafe { create_mapper_from_root_pub(pt_root) };
52 let mut offset = 0usize;
53
54 while offset < data.len() {
55 let vaddr = user_vaddr + offset as u64;
56 let page_vaddr = vaddr & !0xFFF;
57 let in_page_offset = (vaddr & 0xFFF) as usize;
58 let bytes_in_page = core::cmp::min(0x1000 - in_page_offset, data.len() - offset);
59
60 let (frame, _flags) = mapper
61 .translate_page(VirtualAddress(page_vaddr))
62 .map_err(|_| crate::error::KernelError::InvalidArgument {
63 name: "vaddr",
64 value: "page not mapped in VAS",
65 })?;
66
67 let phys_base = frame.as_u64() << 12;
68 let virt = phys_to_virt_addr(phys_base + in_page_offset as u64);
69
70 unsafe {
74 core::ptr::copy_nonoverlapping(
75 data.as_ptr().add(offset),
76 virt as *mut u8,
77 bytes_in_page,
78 );
79 }
80
81 offset += bytes_in_page;
82 }
83
84 Ok(())
85}
86
87fn zero_user_pages(
91 vas: &crate::mm::VirtualAddressSpace,
92 user_vaddr: u64,
93 size: usize,
94) -> Result<(), crate::error::KernelError> {
95 use crate::mm::{phys_to_virt_addr, vas::create_mapper_from_root_pub, VirtualAddress};
96
97 let pt_root = vas.get_page_table();
98 if pt_root == 0 {
99 return Err(crate::error::KernelError::InvalidArgument {
100 name: "vas",
101 value: "page table root is 0",
102 });
103 }
104
105 let mapper = unsafe { create_mapper_from_root_pub(pt_root) };
108 let mut offset = 0usize;
109
110 while offset < size {
111 let vaddr = user_vaddr + offset as u64;
112 let page_vaddr = vaddr & !0xFFF;
113 let in_page_offset = (vaddr & 0xFFF) as usize;
114 let bytes_in_page = core::cmp::min(0x1000 - in_page_offset, size - offset);
115
116 let (frame, _flags) = mapper
117 .translate_page(VirtualAddress(page_vaddr))
118 .map_err(|_| crate::error::KernelError::InvalidArgument {
119 name: "vaddr",
120 value: "page not mapped in VAS",
121 })?;
122
123 let phys_base = frame.as_u64() << 12;
124 let virt = phys_to_virt_addr(phys_base + in_page_offset as u64);
125
126 unsafe {
129 core::ptr::write_bytes(virt as *mut u8, 0, bytes_in_page);
130 }
131
132 offset += bytes_in_page;
133 }
134
135 Ok(())
136}
137
138impl ElfLoader {
139 pub fn new() -> Self {
141 Self
142 }
143
144 pub fn load(
146 data: &[u8],
147 vas: &mut crate::mm::vas::VirtualAddressSpace,
148 ) -> Result<u64, crate::error::KernelError> {
149 let loader = Self::new();
150 let binary =
151 loader
152 .parse(data)
153 .map_err(|_| crate::error::KernelError::InvalidArgument {
154 name: "elf_data",
155 value: "failed to parse ELF binary",
156 })?;
157
158 let mut _load_idx = 0u32;
160 for segment in &binary.segments {
161 if segment.segment_type == SegmentType::Load {
162 let page_start = segment.virtual_addr & !0xFFF;
164 let page_end = (segment.virtual_addr + segment.memory_size + 0xFFF) & !0xFFF;
165 let num_pages = ((page_end - page_start) / 0x1000) as usize;
166
167 #[cfg(target_arch = "x86_64")]
169 unsafe {
171 crate::arch::x86_64::idt::raw_serial_str(b"[ELF] LOAD#");
172 crate::arch::x86_64::idt::raw_serial_hex(_load_idx as u64);
173 crate::arch::x86_64::idt::raw_serial_str(b" va=0x");
174 crate::arch::x86_64::idt::raw_serial_hex(segment.virtual_addr);
175 crate::arch::x86_64::idt::raw_serial_str(b" memsz=0x");
176 crate::arch::x86_64::idt::raw_serial_hex(segment.memory_size);
177 crate::arch::x86_64::idt::raw_serial_str(b" pages=0x");
178 crate::arch::x86_64::idt::raw_serial_hex(num_pages as u64);
179 crate::arch::x86_64::idt::raw_serial_str(b" flags=0x");
180 crate::arch::x86_64::idt::raw_serial_hex(segment.flags as u64);
181 crate::arch::x86_64::idt::raw_serial_str(b"\n");
182 }
183 _load_idx += 1;
184
185 for i in 0..num_pages {
187 let addr = page_start + (i as u64 * 0x1000);
188
189 let mut flags = crate::mm::PageFlags::USER | crate::mm::PageFlags::PRESENT;
191 if (segment.flags & 0x2) != 0 {
192 flags |= crate::mm::PageFlags::WRITABLE;
194 }
195 if (segment.flags & 0x1) == 0 {
196 flags |= crate::mm::PageFlags::NO_EXECUTE;
198 }
199
200 vas.map_page(addr as usize, flags)?;
201
202 #[cfg(feature = "alloc")]
204 {
205 use crate::mm::{vas::create_mapper_from_root_pub, VirtualAddress};
206 let pt_root = vas.get_page_table();
207 if pt_root != 0 {
208 let mapper = unsafe { create_mapper_from_root_pub(pt_root) };
211 mapper.translate_page(VirtualAddress(addr)).map_err(|_| {
212 crate::error::KernelError::UnmappedMemory {
213 addr: addr as usize,
214 }
215 })?;
216 }
217 }
218 }
219
220 if segment.file_size > 0 {
226 let src_slice = &data[segment.file_offset as usize
227 ..(segment.file_offset + segment.file_size) as usize];
228 write_to_user_pages(vas, segment.virtual_addr, src_slice)?;
229 }
230
231 if segment.memory_size > segment.file_size {
233 let bss_size = (segment.memory_size - segment.file_size) as usize;
234 zero_user_pages(vas, segment.virtual_addr + segment.file_size, bss_size)?;
235 }
236 }
237 }
238
239 #[cfg(feature = "alloc")]
241 {
242 let pt_root = vas.get_page_table();
243 if pt_root == 0 {
244 return Err(crate::error::KernelError::InvalidArgument {
245 name: "vas",
246 value: "page table root is 0",
247 });
248 }
249
250 let mapper = unsafe { crate::mm::vas::create_mapper_from_root_pub(pt_root) };
252 use crate::mm::VirtualAddress;
253
254 let entry_page = VirtualAddress(binary.entry_point & !0xFFF);
255 let (_, flags) = mapper.translate_page(entry_page).map_err(|_| {
256 crate::error::KernelError::UnmappedMemory {
257 addr: binary.entry_point as usize,
258 }
259 })?;
260
261 use crate::mm::PageFlags;
263 let needed = PageFlags::PRESENT | PageFlags::USER;
264 if !flags.contains(needed) || flags.contains(PageFlags::NO_EXECUTE) {
265 return Err(crate::error::KernelError::PermissionDenied {
266 operation: "execute entry point",
267 });
268 }
269 }
270
271 #[cfg(target_arch = "x86_64")]
273 unsafe {
275 crate::arch::x86_64::idt::raw_serial_str(b"[ELF] entry=0x");
276 crate::arch::x86_64::idt::raw_serial_hex(binary.entry_point);
277 crate::arch::x86_64::idt::raw_serial_str(b"\n");
278 }
279
280 Ok(binary.entry_point)
281 }
282
283 pub fn parse(&self, data: &[u8]) -> Result<ElfBinary, ElfError> {
285 let header = self.parse_header(data)?;
287
288 self.validate_header(&header)?;
290
291 let program_headers = self.parse_program_headers(data, &header)?;
293
294 let (load_base, load_size) = self.calculate_memory_layout(&program_headers)?;
296
297 let interpreter = self.find_interpreter(data, &header, &program_headers)?;
299
300 let dynamic = program_headers
302 .iter()
303 .any(|ph| ph.p_type == ProgramType::Dynamic as u32);
304
305 let mut segments = Vec::new();
307 for ph in &program_headers {
308 let segment_type = match ph.p_type {
309 0 => SegmentType::Null,
310 1 => SegmentType::Load,
311 2 => SegmentType::Dynamic,
312 3 => SegmentType::Interp,
313 4 => SegmentType::Note,
314 5 => SegmentType::Shlib,
315 6 => SegmentType::Phdr,
316 7 => SegmentType::Tls,
317 other => SegmentType::Other(other),
318 };
319
320 segments.push(ElfSegment {
321 segment_type,
322 virtual_addr: ph.p_vaddr,
323 physical_addr: ph.p_paddr,
324 file_offset: ph.p_offset,
325 file_size: ph.p_filesz,
326 memory_size: ph.p_memsz,
327 flags: ph.p_flags,
328 alignment: ph.p_align,
329 });
330 }
331
332 Ok(ElfBinary {
333 entry_point: header.entry,
334 load_base,
335 load_size,
336 segments,
337 interpreter,
338 dynamic,
339 })
340 }
341
342 pub fn load_into_memory(&self, data: &[u8], target_base: u64) -> Result<u64, ElfError> {
344 let header = self.parse_header(data)?;
346
347 self.validate_header(&header)?;
349
350 let program_headers = self.parse_program_headers(data, &header)?;
352
353 for ph in program_headers.iter() {
355 if ph.p_type == ProgramType::Load as u32 {
356 self.load_segment(data, ph, target_base)?;
357 }
358 }
359
360 Ok(header.entry)
362 }
363
364 fn parse_header(&self, data: &[u8]) -> Result<Elf64Header, ElfError> {
366 if data.len() < mem::size_of::<Elf64Header>() {
367 return Err(ElfError::InvalidMagic);
368 }
369
370 let header = unsafe { *(data.as_ptr() as *const Elf64Header) };
375
376 Ok(header)
377 }
378
379 fn validate_header(&self, header: &Elf64Header) -> Result<(), ElfError> {
381 if header.magic != ELF_MAGIC {
383 return Err(ElfError::InvalidMagic);
384 }
385
386 if header.class != ElfClass::Elf64 as u8 {
388 return Err(ElfError::InvalidClass);
389 }
390
391 if header.data != ElfData::LittleEndian as u8 {
393 return Err(ElfError::InvalidData);
394 }
395
396 let elf_type = header.elf_type;
398 if elf_type != ElfType::Executable as u16 && elf_type != ElfType::SharedObject as u16 {
399 return Err(ElfError::InvalidType);
400 }
401
402 let machine = header.machine;
404 match machine {
405 62 => {} 183 => {} 243 => {} _ => return Err(ElfError::UnsupportedMachine),
409 }
410
411 Ok(())
412 }
413
414 fn parse_program_headers(
416 &self,
417 data: &[u8],
418 header: &Elf64Header,
419 ) -> Result<Vec<Elf64ProgramHeader>, ElfError> {
420 let mut headers = Vec::new();
421
422 let ph_offset = header.phoff as usize;
423 let ph_size = header.phentsize as usize;
424 let ph_count = header.phnum as usize;
425
426 for i in 0..ph_count {
427 let offset = ph_offset + (i * ph_size);
428 if offset + ph_size > data.len() {
429 return Err(ElfError::InvalidProgramHeader);
430 }
431
432 let ph = unsafe { *(data[offset..].as_ptr() as *const Elf64ProgramHeader) };
437
438 headers.push(ph);
439 }
440
441 Ok(headers)
442 }
443
444 fn calculate_memory_layout(
446 &self,
447 program_headers: &[Elf64ProgramHeader],
448 ) -> Result<(u64, usize), ElfError> {
449 let mut min_addr = u64::MAX;
450 let mut max_addr = 0u64;
451
452 for ph in program_headers {
453 if ph.p_type == ProgramType::Load as u32 {
454 if ph.p_vaddr < min_addr {
455 min_addr = ph.p_vaddr;
456 }
457 let end_addr = ph.p_vaddr + ph.p_memsz;
458 if end_addr > max_addr {
459 max_addr = end_addr;
460 }
461 }
462 }
463
464 if min_addr == u64::MAX {
465 return Err(ElfError::InvalidProgramHeader);
466 }
467
468 let load_size = (max_addr - min_addr) as usize;
469 Ok((min_addr, load_size))
470 }
471
472 pub fn parse_dynamic_section(
474 &self,
475 data: &[u8],
476 dynamic_offset: u64,
477 dynamic_size: u64,
478 ) -> Result<DynamicInfo, ElfError> {
479 let mut info = DynamicInfo {
480 needed: Vec::new(),
481 soname: None,
482 rpath: None,
483 runpath: None,
484 init: None,
485 fini: None,
486 init_array: None,
487 fini_array: None,
488 hash: None,
489 strtab: None,
490 symtab: None,
491 strsz: 0,
492 syment: 0,
493 pltgot: None,
494 pltrelsz: 0,
495 pltrel: None,
496 jmprel: None,
497 rel: None,
498 relsz: 0,
499 relent: 0,
500 rela: None,
501 relasz: 0,
502 relaent: 0,
503 };
504
505 let entry_size = mem::size_of::<u64>() * 2; let num_entries = (dynamic_size as usize) / entry_size;
507 let offset = dynamic_offset as usize;
508
509 for i in 0..num_entries {
510 let entry_offset = offset + (i * entry_size);
511 if entry_offset + entry_size > data.len() {
512 break;
513 }
514
515 let tag = unsafe { *(data[entry_offset..].as_ptr() as *const i64) };
520 let value = unsafe { *(data[entry_offset + 8..].as_ptr() as *const u64) };
521
522 match tag {
523 0 => break, 1 => { }
528 5 => info.strtab = Some(value), 6 => info.symtab = Some(value), 10 => info.strsz = value as usize, 11 => info.syment = value as usize, 12 => info.init = Some(value), 13 => info.fini = Some(value), 14 => info.soname = Some(String::new()), 15 => info.rpath = Some(String::new()), 17 => info.rel = Some(value), 18 => info.relsz = value as usize, 19 => info.relent = value as usize, 20 => info.pltrel = Some(value), 23 => info.jmprel = Some(value), 7 => info.rela = Some(value), 8 => info.relasz = value as usize, 9 => info.relaent = value as usize, 25 => {
545 info.init_array = Some((value, 0));
547 }
548 27 => {
549 if let Some((addr, _)) = info.init_array {
551 info.init_array = Some((addr, value as usize / 8));
552 }
553 }
554 26 => {
555 info.fini_array = Some((value, 0));
557 }
558 28 => {
559 if let Some((addr, _)) = info.fini_array {
561 info.fini_array = Some((addr, value as usize / 8));
562 }
563 }
564 29 => info.runpath = Some(String::new()), _ => {} }
567 }
568
569 Ok(info)
570 }
571
572 pub fn perform_relocations(
574 &self,
575 base_addr: u64,
576 relocations: &[ElfRelocation],
577 symbols: &[ElfSymbol],
578 ) -> Result<(), ElfError> {
579 for reloc in relocations {
580 let target_addr = base_addr + reloc.offset;
581
582 match reloc.reloc_type {
583 8 => {
585 unsafe {
591 let ptr = target_addr as *mut u64;
592 *ptr = base_addr + reloc.addend as u64;
593 }
594 }
595 1 => {
597 if reloc.symbol as usize >= symbols.len() {
599 return Err(ElfError::InvalidSymbol);
600 }
601 let symbol = &symbols[reloc.symbol as usize];
602 unsafe {
606 let ptr = target_addr as *mut u64;
607 *ptr = symbol.value + reloc.addend as u64;
608 }
609 }
610 6 => {
612 if reloc.symbol as usize >= symbols.len() {
614 return Err(ElfError::InvalidSymbol);
615 }
616 let symbol = &symbols[reloc.symbol as usize];
617 unsafe {
621 let ptr = target_addr as *mut u64;
622 *ptr = symbol.value;
623 }
624 }
625 7 => {
627 if reloc.symbol as usize >= symbols.len() {
629 return Err(ElfError::InvalidSymbol);
630 }
631 let symbol = &symbols[reloc.symbol as usize];
632 unsafe {
637 let ptr = target_addr as *mut u64;
638 *ptr = symbol.value;
639 }
640 }
641 _ => {
642 return Err(ElfError::RelocationFailed);
644 }
645 }
646 }
647
648 Ok(())
649 }
650
651 pub fn resolve_symbols(
653 &self,
654 data: &[u8],
655 symtab_offset: u64,
656 symtab_size: usize,
657 strtab_offset: u64,
658 strtab_size: usize,
659 ) -> Result<Vec<ElfSymbol>, ElfError> {
660 let mut symbols = Vec::new();
661
662 let sym_entry_size = 24; let num_symbols = symtab_size / sym_entry_size;
664
665 for i in 0..num_symbols {
666 let sym_offset = (symtab_offset as usize) + (i * sym_entry_size);
667 if sym_offset + sym_entry_size > data.len() {
668 break;
669 }
670
671 let name_idx = unsafe { *(data[sym_offset..].as_ptr() as *const u32) };
678 let info = data[sym_offset + 4];
679 let other = data[sym_offset + 5];
680 let shndx = unsafe { *(data[sym_offset + 6..].as_ptr() as *const u16) };
681 let value = unsafe { *(data[sym_offset + 8..].as_ptr() as *const u64) };
682 let size = unsafe { *(data[sym_offset + 16..].as_ptr() as *const u64) };
683
684 let name = if (name_idx as usize) < strtab_size {
686 let name_offset = (strtab_offset as usize) + (name_idx as usize);
687 self.read_string(data, name_offset)?
688 } else {
689 String::new()
690 };
691
692 symbols.push(ElfSymbol {
693 name,
694 value,
695 size,
696 info,
697 other,
698 shndx,
699 });
700 }
701
702 Ok(symbols)
703 }
704
705 fn read_string(&self, data: &[u8], offset: usize) -> Result<String, ElfError> {
707 let mut end = offset;
708 while end < data.len() && data[end] != 0 {
709 end += 1;
710 }
711
712 if end > data.len() {
713 return Err(ElfError::InvalidData);
714 }
715
716 String::from_utf8(data[offset..end].to_vec()).map_err(|_| ElfError::InvalidData)
717 }
718
719 fn find_interpreter(
721 &self,
722 data: &[u8],
723 _header: &Elf64Header,
724 program_headers: &[Elf64ProgramHeader],
725 ) -> Result<Option<String>, ElfError> {
726 for ph in program_headers {
727 if ph.p_type == ProgramType::Interp as u32 {
728 let offset = ph.p_offset as usize;
729 let size = ph.p_filesz as usize;
730
731 if offset + size > data.len() {
732 return Err(ElfError::InvalidProgramHeader);
733 }
734
735 let interp_data = &data[offset..offset + size];
736 let interp_str = core::str::from_utf8(&interp_data[..size - 1])
738 .map_err(|_| ElfError::InvalidProgramHeader)?;
739
740 return Ok(Some(String::from(interp_str)));
741 }
742 }
743
744 Ok(None)
745 }
746
747 fn load_segment(
749 &self,
750 data: &[u8],
751 ph: &Elf64ProgramHeader,
752 base_addr: u64,
753 ) -> Result<(), ElfError> {
754 let file_offset = ph.p_offset as usize;
755 let file_size = ph.p_filesz as usize;
756 let mem_size = ph.p_memsz as usize;
757 let vaddr = ph.p_vaddr;
758
759 if file_offset + file_size > data.len() {
761 return Err(ElfError::InvalidProgramHeader);
762 }
763
764 let target_addr = (base_addr + vaddr) as *mut u8;
766
767 if file_size > 0 {
769 unsafe {
775 let src = &data[file_offset..file_offset + file_size];
776 let dst = slice::from_raw_parts_mut(target_addr, file_size);
777 dst.copy_from_slice(src);
778 }
779 }
780
781 if mem_size > file_size {
783 unsafe {
788 let bss_start = target_addr.add(file_size);
789 let bss_size = mem_size - file_size;
790 core::ptr::write_bytes(bss_start, 0, bss_size);
791 }
792 }
793
794 Ok(())
795 }
796
797 pub fn process_relocations(&self, data: &[u8], base_addr: u64) -> Result<(), ElfError> {
803 let header = self.parse_header(data)?;
804 let program_headers = self.parse_program_headers(data, &header)?;
805
806 let dynamic_ph = program_headers
808 .iter()
809 .find(|ph| ph.p_type == ProgramType::Dynamic as u32);
810
811 let dynamic_ph = match dynamic_ph {
812 Some(ph) => ph,
813 None => return Ok(()), };
815
816 let dyn_info =
818 self.parse_dynamic_section(data, dynamic_ph.p_offset, dynamic_ph.p_filesz)?;
819
820 let symbols = if let (Some(symtab), Some(strtab)) = (dyn_info.symtab, dyn_info.strtab) {
822 let symtab_file = self.vaddr_to_file_offset(&program_headers, symtab);
825 let strtab_file = self.vaddr_to_file_offset(&program_headers, strtab);
826
827 if let (Some(sym_off), Some(str_off)) = (symtab_file, strtab_file) {
828 let sym_size = if dyn_info.syment > 0 {
830 let max_sym = self.estimate_max_symbol_index(data, &dyn_info);
832 (max_sym + 1) * dyn_info.syment
833 } else {
834 (str_off as usize).saturating_sub(sym_off as usize)
835 };
836
837 self.resolve_symbols(data, sym_off, sym_size, str_off, dyn_info.strsz)
838 .unwrap_or_default()
839 } else {
840 Vec::new()
841 }
842 } else {
843 Vec::new()
844 };
845
846 let mut relocations = Vec::new();
848
849 if let Some(rela_vaddr) = dyn_info.rela {
850 if dyn_info.relasz > 0 && dyn_info.relaent > 0 {
851 if let Some(rela_off) = self.vaddr_to_file_offset(&program_headers, rela_vaddr) {
852 let count = dyn_info.relasz / dyn_info.relaent;
853 self.parse_rela_entries(data, rela_off as usize, count, &mut relocations)?;
854 }
855 }
856 }
857
858 if let Some(jmprel_vaddr) = dyn_info.jmprel {
860 if dyn_info.pltrelsz > 0 {
861 if let Some(jmprel_off) = self.vaddr_to_file_offset(&program_headers, jmprel_vaddr)
862 {
863 let entry_size = if dyn_info.relaent > 0 {
864 dyn_info.relaent
865 } else {
866 mem::size_of::<Elf64Rela>()
867 };
868 let count = dyn_info.pltrelsz / entry_size;
869 self.parse_rela_entries(data, jmprel_off as usize, count, &mut relocations)?;
870 }
871 }
872 }
873
874 self.perform_relocations_arch(base_addr, &relocations, &symbols, header.machine)
876 }
877
878 fn vaddr_to_file_offset(
880 &self,
881 program_headers: &[Elf64ProgramHeader],
882 vaddr: u64,
883 ) -> Option<u64> {
884 for ph in program_headers {
885 if ph.p_type == ProgramType::Load as u32
886 && vaddr >= ph.p_vaddr
887 && vaddr < ph.p_vaddr + ph.p_filesz
888 {
889 return Some(ph.p_offset + (vaddr - ph.p_vaddr));
890 }
891 }
892 None
893 }
894
895 fn parse_rela_entries(
897 &self,
898 data: &[u8],
899 offset: usize,
900 count: usize,
901 out: &mut Vec<ElfRelocation>,
902 ) -> Result<(), ElfError> {
903 let entry_size = mem::size_of::<Elf64Rela>();
904 for i in 0..count {
905 let pos = offset + i * entry_size;
906 if pos + entry_size > data.len() {
907 break;
908 }
909 let rela = unsafe { *(data[pos..].as_ptr() as *const Elf64Rela) };
912 out.push(ElfRelocation {
913 offset: rela.r_offset,
914 symbol: (rela.r_info >> 32) as u32,
915 reloc_type: (rela.r_info & 0xFFFF_FFFF) as u32,
916 addend: rela.r_addend,
917 });
918 }
919 Ok(())
920 }
921
922 fn estimate_max_symbol_index(&self, data: &[u8], dyn_info: &DynamicInfo) -> usize {
924 let mut max_idx: usize = 0;
925 let _entry_size = mem::size_of::<Elf64Rela>();
926
927 if let Some(_rela) = dyn_info.rela {
929 let count = if dyn_info.relaent > 0 {
930 dyn_info.relasz / dyn_info.relaent
931 } else {
932 0
933 };
934 max_idx = max_idx.max(count);
936 }
937 if let Some(_jmprel) = dyn_info.jmprel {
939 let count = if dyn_info.relaent > 0 {
940 dyn_info.pltrelsz / dyn_info.relaent
941 } else {
942 0
943 };
944 max_idx = max_idx.max(count);
945 }
946 let _ = data; max_idx.max(64) }
949
950 fn perform_relocations_arch(
954 &self,
955 base_addr: u64,
956 relocations: &[ElfRelocation],
957 symbols: &[ElfSymbol],
958 machine: u16,
959 ) -> Result<(), ElfError> {
960 for reloc in relocations {
961 let target_addr = base_addr.wrapping_add(reloc.offset);
962
963 match machine {
964 62 => self.apply_x86_64_reloc(target_addr, reloc, symbols, base_addr)?,
966 183 => self.apply_aarch64_reloc(target_addr, reloc, symbols, base_addr)?,
968 243 => self.apply_riscv_reloc(target_addr, reloc, symbols, base_addr)?,
970 _ => return Err(ElfError::UnsupportedMachine),
971 }
972 }
973 Ok(())
974 }
975
976 fn apply_x86_64_reloc(
978 &self,
979 target_addr: u64,
980 reloc: &ElfRelocation,
981 symbols: &[ElfSymbol],
982 base_addr: u64,
983 ) -> Result<(), ElfError> {
984 match reloc.reloc_type {
985 8 => {
986 unsafe {
989 *(target_addr as *mut u64) = base_addr.wrapping_add(reloc.addend as u64);
990 }
991 }
992 1 => {
993 let sym = self.get_symbol(symbols, reloc.symbol)?;
995 unsafe {
997 *(target_addr as *mut u64) = sym.value.wrapping_add(reloc.addend as u64);
998 }
999 }
1000 6 => {
1001 let sym = self.get_symbol(symbols, reloc.symbol)?;
1003 unsafe {
1005 *(target_addr as *mut u64) = sym.value;
1006 }
1007 }
1008 7 => {
1009 let sym = self.get_symbol(symbols, reloc.symbol)?;
1011 unsafe {
1013 *(target_addr as *mut u64) = sym.value;
1014 }
1015 }
1016 _ => {} }
1018 Ok(())
1019 }
1020
1021 fn apply_aarch64_reloc(
1023 &self,
1024 target_addr: u64,
1025 reloc: &ElfRelocation,
1026 symbols: &[ElfSymbol],
1027 base_addr: u64,
1028 ) -> Result<(), ElfError> {
1029 match reloc.reloc_type {
1030 1027 => {
1031 unsafe {
1034 *(target_addr as *mut u64) = base_addr.wrapping_add(reloc.addend as u64);
1035 }
1036 }
1037 257 => {
1038 let sym = self.get_symbol(symbols, reloc.symbol)?;
1040 unsafe {
1042 *(target_addr as *mut u64) = sym.value.wrapping_add(reloc.addend as u64);
1043 }
1044 }
1045 1025 => {
1046 let sym = self.get_symbol(symbols, reloc.symbol)?;
1048 unsafe {
1050 *(target_addr as *mut u64) = sym.value.wrapping_add(reloc.addend as u64);
1051 }
1052 }
1053 1026 => {
1054 let sym = self.get_symbol(symbols, reloc.symbol)?;
1056 unsafe {
1058 *(target_addr as *mut u64) = sym.value;
1059 }
1060 }
1061 _ => {} }
1063 Ok(())
1064 }
1065
1066 fn apply_riscv_reloc(
1068 &self,
1069 target_addr: u64,
1070 reloc: &ElfRelocation,
1071 symbols: &[ElfSymbol],
1072 base_addr: u64,
1073 ) -> Result<(), ElfError> {
1074 match reloc.reloc_type {
1075 3 => {
1076 unsafe {
1079 *(target_addr as *mut u64) = base_addr.wrapping_add(reloc.addend as u64);
1080 }
1081 }
1082 2 => {
1083 let sym = self.get_symbol(symbols, reloc.symbol)?;
1085 unsafe {
1087 *(target_addr as *mut u64) = sym.value.wrapping_add(reloc.addend as u64);
1088 }
1089 }
1090 5 => {
1091 let sym = self.get_symbol(symbols, reloc.symbol)?;
1093 unsafe {
1095 *(target_addr as *mut u64) = sym.value;
1096 }
1097 }
1098 _ => {} }
1100 Ok(())
1101 }
1102
1103 fn get_symbol<'a>(
1105 &self,
1106 symbols: &'a [ElfSymbol],
1107 index: u32,
1108 ) -> Result<&'a ElfSymbol, ElfError> {
1109 symbols.get(index as usize).ok_or(ElfError::InvalidSymbol)
1110 }
1111}
1112
1113pub fn load_elf_from_file(path: &str) -> Result<ElfBinary, ElfError> {
1115 use crate::fs::get_vfs;
1116
1117 let vfs = get_vfs().read();
1119 let node = vfs
1120 .resolve_path(path)
1121 .map_err(|_| ElfError::FileReadFailed)?;
1122
1123 let metadata = node.metadata().map_err(|_| ElfError::FileReadFailed)?;
1125 let size = metadata.size;
1126
1127 let mut buffer = Vec::with_capacity(size);
1129 buffer.resize(size, 0);
1130
1131 node.read(0, &mut buffer)
1132 .map_err(|_| ElfError::FileReadFailed)?;
1133
1134 let loader = ElfLoader::new();
1136 loader.parse(&buffer)
1137}
1138
1139#[cfg(test)]
1140mod tests {
1141 use alloc::vec;
1142
1143 use super::*;
1144
1145 fn make_minimal_elf(
1149 elf_type: u16,
1150 machine: u16,
1151 entry: u64,
1152 vaddr: u64,
1153 memsz: u64,
1154 ) -> Vec<u8> {
1155 let header_size = core::mem::size_of::<Elf64Header>();
1156 let ph_size = core::mem::size_of::<Elf64ProgramHeader>();
1157 let total_size = header_size + ph_size;
1158 let mut buf = vec![0u8; total_size];
1159
1160 buf[0] = 0x7f;
1162 buf[1] = b'E';
1163 buf[2] = b'L';
1164 buf[3] = b'F';
1165 buf[4] = 2;
1167 buf[5] = 1;
1169 buf[6] = 1;
1171 buf[16] = (elf_type & 0xFF) as u8;
1173 buf[17] = ((elf_type >> 8) & 0xFF) as u8;
1174 buf[18] = (machine & 0xFF) as u8;
1176 buf[19] = ((machine >> 8) & 0xFF) as u8;
1177 buf[20] = 1;
1179 let entry_bytes = entry.to_le_bytes();
1181 buf[24..32].copy_from_slice(&entry_bytes);
1182 let phoff = (header_size as u64).to_le_bytes();
1184 buf[32..40].copy_from_slice(&phoff);
1185 buf[52] = (header_size & 0xFF) as u8;
1187 buf[53] = ((header_size >> 8) & 0xFF) as u8;
1188 buf[54] = (ph_size & 0xFF) as u8;
1190 buf[55] = ((ph_size >> 8) & 0xFF) as u8;
1191 buf[56] = 1;
1193 buf[57] = 0;
1194
1195 let ph_offset = header_size;
1197 buf[ph_offset] = 1;
1199 buf[ph_offset + 4] = 7;
1201 let vaddr_bytes = vaddr.to_le_bytes();
1204 buf[ph_offset + 16..ph_offset + 24].copy_from_slice(&vaddr_bytes);
1205 buf[ph_offset + 24..ph_offset + 32].copy_from_slice(&vaddr_bytes);
1207 let memsz_bytes = memsz.to_le_bytes();
1210 buf[ph_offset + 40..ph_offset + 48].copy_from_slice(&memsz_bytes);
1211 let align = 0x1000u64.to_le_bytes();
1213 buf[ph_offset + 48..ph_offset + 56].copy_from_slice(&align);
1214
1215 buf
1216 }
1217
1218 #[test]
1221 fn test_elf_loader_new() {
1222 let loader = ElfLoader::new();
1223 let _ = loader;
1225 }
1226
1227 #[test]
1228 fn test_elf_loader_default() {
1229 let loader = ElfLoader::default();
1230 let _ = loader;
1231 }
1232
1233 #[test]
1236 fn test_parse_invalid_magic() {
1237 let loader = ElfLoader::new();
1238 let data = vec![0u8; 128]; let result = loader.parse(&data);
1240 assert!(result.is_err());
1241 assert!(matches!(result.unwrap_err(), ElfError::InvalidMagic));
1242 }
1243
1244 #[test]
1245 fn test_parse_too_small() {
1246 let loader = ElfLoader::new();
1247 let data = vec![0x7f, b'E', b'L', b'F']; let result = loader.parse(&data);
1249 assert!(result.is_err());
1250 }
1251
1252 #[test]
1253 fn test_parse_wrong_class_32bit() {
1254 let loader = ElfLoader::new();
1255 let mut data = vec![0u8; 128];
1256 data[0] = 0x7f;
1258 data[1] = b'E';
1259 data[2] = b'L';
1260 data[3] = b'F';
1261 data[4] = 1;
1263 data[5] = 1; data[6] = 1; let result = loader.parse(&data);
1267 assert!(result.is_err());
1268 assert!(matches!(result.unwrap_err(), ElfError::InvalidClass));
1269 }
1270
1271 #[test]
1272 fn test_parse_wrong_endian() {
1273 let loader = ElfLoader::new();
1274 let mut data = vec![0u8; 128];
1275 data[0] = 0x7f;
1276 data[1] = b'E';
1277 data[2] = b'L';
1278 data[3] = b'F';
1279 data[4] = 2; data[5] = 2; data[6] = 1;
1282
1283 let result = loader.parse(&data);
1284 assert!(result.is_err());
1285 assert!(matches!(result.unwrap_err(), ElfError::InvalidData));
1286 }
1287
1288 #[test]
1289 fn test_parse_invalid_type_none() {
1290 let loader = ElfLoader::new();
1291 let mut data = vec![0u8; 128];
1292 data[0..4].copy_from_slice(&ELF_MAGIC);
1293 data[4] = 2; data[5] = 1; data[6] = 1;
1296 data[18] = 62; let result = loader.parse(&data);
1300 assert!(result.is_err());
1301 assert!(matches!(result.unwrap_err(), ElfError::InvalidType));
1302 }
1303
1304 #[test]
1305 fn test_parse_unsupported_machine() {
1306 let loader = ElfLoader::new();
1307 let mut data = vec![0u8; 128];
1308 data[0..4].copy_from_slice(&ELF_MAGIC);
1309 data[4] = 2;
1310 data[5] = 1;
1311 data[6] = 1;
1312 data[16] = 2; data[18] = 99;
1315
1316 let result = loader.parse(&data);
1317 assert!(result.is_err());
1318 assert!(matches!(result.unwrap_err(), ElfError::UnsupportedMachine));
1319 }
1320
1321 #[test]
1324 fn test_parse_valid_executable_x86_64() {
1325 let loader = ElfLoader::new();
1326 let data = make_minimal_elf(2, 62, 0x401000, 0x400000, 0x1000);
1327
1328 let result = loader.parse(&data);
1329 assert!(result.is_ok());
1330
1331 let binary = result.unwrap();
1332 assert_eq!(binary.entry_point, 0x401000);
1333 assert_eq!(binary.load_base, 0x400000);
1334 assert_eq!(binary.load_size, 0x1000);
1335 assert!(!binary.dynamic);
1336 assert!(binary.interpreter.is_none());
1337 }
1338
1339 #[test]
1340 fn test_parse_valid_executable_aarch64() {
1341 let loader = ElfLoader::new();
1342 let data = make_minimal_elf(2, 183, 0x400000, 0x400000, 0x2000);
1343
1344 let result = loader.parse(&data);
1345 assert!(result.is_ok());
1346 let binary = result.unwrap();
1347 assert_eq!(binary.entry_point, 0x400000);
1348 }
1349
1350 #[test]
1351 fn test_parse_valid_executable_riscv() {
1352 let loader = ElfLoader::new();
1353 let data = make_minimal_elf(2, 243, 0x10000, 0x10000, 0x4000);
1354
1355 let result = loader.parse(&data);
1356 assert!(result.is_ok());
1357 let binary = result.unwrap();
1358 assert_eq!(binary.entry_point, 0x10000);
1359 }
1360
1361 #[test]
1362 fn test_parse_shared_object() {
1363 let loader = ElfLoader::new();
1364 let data = make_minimal_elf(3, 62, 0x0, 0x0, 0x1000);
1366
1367 let result = loader.parse(&data);
1368 assert!(result.is_ok());
1369 }
1370
1371 #[test]
1374 fn test_parse_segments_load() {
1375 let loader = ElfLoader::new();
1376 let data = make_minimal_elf(2, 62, 0x401000, 0x400000, 0x3000);
1377
1378 let binary = loader.parse(&data).unwrap();
1379 assert_eq!(binary.segments.len(), 1);
1380
1381 let seg = &binary.segments[0];
1382 assert_eq!(seg.segment_type, SegmentType::Load);
1383 assert_eq!(seg.virtual_addr, 0x400000);
1384 assert_eq!(seg.memory_size, 0x3000);
1385 }
1386
1387 #[test]
1390 fn test_calculate_memory_layout_empty() {
1391 let loader = ElfLoader::new();
1392 let headers: Vec<Elf64ProgramHeader> = vec![];
1393
1394 let result = loader.calculate_memory_layout(&headers);
1395 assert!(result.is_err());
1396 }
1397
1398 #[test]
1399 fn test_calculate_memory_layout_single_load() {
1400 let loader = ElfLoader::new();
1401 let headers = vec![Elf64ProgramHeader {
1402 p_type: 1, p_flags: 5,
1404 p_offset: 0,
1405 p_vaddr: 0x400000,
1406 p_paddr: 0x400000,
1407 p_filesz: 0x1000,
1408 p_memsz: 0x2000,
1409 p_align: 0x1000,
1410 }];
1411
1412 let (base, size) = loader.calculate_memory_layout(&headers).unwrap();
1413 assert_eq!(base, 0x400000);
1414 assert_eq!(size, 0x2000);
1415 }
1416
1417 #[test]
1418 fn test_calculate_memory_layout_multiple_loads() {
1419 let loader = ElfLoader::new();
1420 let headers = vec![
1421 Elf64ProgramHeader {
1422 p_type: 1, p_flags: 5,
1424 p_offset: 0,
1425 p_vaddr: 0x400000,
1426 p_paddr: 0x400000,
1427 p_filesz: 0x1000,
1428 p_memsz: 0x1000,
1429 p_align: 0x1000,
1430 },
1431 Elf64ProgramHeader {
1432 p_type: 1, p_flags: 6,
1434 p_offset: 0x1000,
1435 p_vaddr: 0x600000,
1436 p_paddr: 0x600000,
1437 p_filesz: 0x500,
1438 p_memsz: 0x3000,
1439 p_align: 0x1000,
1440 },
1441 ];
1442
1443 let (base, size) = loader.calculate_memory_layout(&headers).unwrap();
1444 assert_eq!(base, 0x400000);
1445 assert_eq!(size, (0x600000 + 0x3000 - 0x400000));
1446 }
1447
1448 #[test]
1451 fn test_read_string_valid() {
1452 let loader = ElfLoader::new();
1453 let data = b"hello\0world\0";
1454 let result = loader.read_string(data, 0);
1455 assert!(result.is_ok());
1456 assert_eq!(result.unwrap(), "hello");
1457 }
1458
1459 #[test]
1460 fn test_read_string_at_offset() {
1461 let loader = ElfLoader::new();
1462 let data = b"hello\0world\0";
1463 let result = loader.read_string(data, 6);
1464 assert!(result.is_ok());
1465 assert_eq!(result.unwrap(), "world");
1466 }
1467
1468 #[test]
1469 fn test_read_string_empty() {
1470 let loader = ElfLoader::new();
1471 let data = b"\0rest";
1472 let result = loader.read_string(data, 0);
1473 assert!(result.is_ok());
1474 assert_eq!(result.unwrap(), "");
1475 }
1476
1477 #[test]
1480 fn test_segment_type_equality() {
1481 assert_eq!(SegmentType::Null, SegmentType::Null);
1482 assert_eq!(SegmentType::Load, SegmentType::Load);
1483 assert_ne!(SegmentType::Load, SegmentType::Dynamic);
1484 assert_eq!(SegmentType::Other(99), SegmentType::Other(99));
1485 assert_ne!(SegmentType::Other(1), SegmentType::Other(2));
1486 }
1487}
1488
1489pub fn exec_elf(path: &str) -> Result<u64, ElfError> {
1491 use crate::process::ProcessId;
1492
1493 let elf_info = load_elf_from_file(path)?;
1495
1496 let current_pid = crate::process::current_process()
1498 .map(|p| p.pid)
1499 .unwrap_or(ProcessId(1));
1500 let process =
1501 crate::process::get_process(current_pid).ok_or(ElfError::MemoryAllocationFailed)?;
1502
1503 let base_addr = 0x400000; let mut memory_space = process.memory_space.lock();
1506 let vas = &mut *memory_space;
1507
1508 for offset in (0..elf_info.load_size).step_by(4096) {
1509 vas.map_page(
1510 (base_addr + offset) as usize,
1511 crate::mm::PageFlags::USER | crate::mm::PageFlags::WRITABLE,
1512 )
1513 .map_err(|_| ElfError::MemoryAllocationFailed)?;
1514 }
1515
1516 let vfs = get_vfs().read();
1518 let node = vfs
1519 .resolve_path(path)
1520 .map_err(|_| ElfError::FileReadFailed)?;
1521
1522 let mut buffer = Vec::with_capacity(elf_info.load_size);
1523 buffer.resize(elf_info.load_size, 0);
1524
1525 node.read(0, &mut buffer)
1526 .map_err(|_| ElfError::FileReadFailed)?;
1527
1528 let loader = ElfLoader::new();
1530 let entry_point = loader.load_into_memory(&buffer, base_addr as u64)?;
1531
1532 if elf_info.dynamic {
1534 loader.process_relocations(&buffer, base_addr as u64)?;
1535 }
1536
1537 Ok(entry_point)
1538}