1use core::num;
33
34use crate::environment::PAGE_SIZE;
35use crate::fs::{File, SeekFrom};
36use crate::mem::page::{allocate_raw_pages, free_raw_pages};
37use crate::vm::vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryPermission, VirtualMemoryRegion};
38use alloc::boxed::Box;
39use alloc::{format, vec};
40use alloc::string::{String, ToString};
41use crate::task::Task;
42
43use super::{ManagedPage, TaskType};
44
45const ELFMAG: [u8; 4] = [0x7F, b'E', b'L', b'F', ];
47const ELFCLASS64: u8 = 2; const ELFDATA2LSB: u8 = 1; const PT_LOAD: u32 = 1; pub const PF_X: u32 = 1; pub const PF_W: u32 = 2; pub const PF_R: u32 = 4; const EI_MAG0: usize = 0;
64const EI_MAG1: usize = 1;
65const EI_MAG2: usize = 2;
66const EI_MAG3: usize = 3;
67const EI_CLASS: usize = 4;
68const EI_DATA: usize = 5;
69fn read_u16(buffer: &[u8], offset: usize, is_little_endian: bool) -> u16 {
73 let bytes = buffer[offset..offset+2].try_into().unwrap();
74 if is_little_endian {
75 u16::from_le_bytes(bytes)
76 } else {
77 u16::from_be_bytes(bytes)
78 }
79}
80
81fn read_u32(buffer: &[u8], offset: usize, is_little_endian: bool) -> u32 {
82 let bytes = buffer[offset..offset+4].try_into().unwrap();
83 if is_little_endian {
84 u32::from_le_bytes(bytes)
85 } else {
86 u32::from_be_bytes(bytes)
87 }
88}
89
90fn read_u64(buffer: &[u8], offset: usize, is_little_endian: bool) -> u64 {
91 let bytes = buffer[offset..offset+8].try_into().unwrap();
92 if is_little_endian {
93 u64::from_le_bytes(bytes)
94 } else {
95 u64::from_be_bytes(bytes)
96 }
97}
98
99#[derive(Debug)]
100pub struct ElfHeader {
101 pub ei_class: u8, pub ei_data: u8, pub e_type: u16, pub e_machine: u16, pub e_version: u32, pub e_entry: u64, pub e_phoff: u64, pub e_shoff: u64, pub e_flags: u32, pub e_ehsize: u16, pub e_phentsize: u16, pub e_phnum: u16, pub e_shentsize: u16, pub e_shnum: u16, pub e_shstrndx: u16, }
117
118#[derive(Debug)]
119pub struct ProgramHeader {
120 pub p_type: u32, pub p_flags: u32, pub p_offset: u64, pub p_vaddr: u64, pub p_paddr: u64, pub p_filesz: u64, pub p_memsz: u64, pub p_align: u64, }
129
130#[derive(Debug)]
131pub enum ElfHeaderParseErrorKind {
132 InvalidMagicNumber,
133 UnsupportedClass,
134 InvalidData,
135 Other(String),
136}
137
138#[derive(Debug)]
139pub struct ElfHeaderParseError {
140 pub kind: ElfHeaderParseErrorKind,
141 pub message: String,
142}
143
144#[derive(Debug)]
145pub enum ProgramHeaderParseErrorKind {
146 InvalidSize,
147 Other(String),
148}
149
150#[derive(Debug)]
151pub struct ProgramHeaderParseError {
152 pub kind: ProgramHeaderParseErrorKind,
153 pub message: String,
154}
155
156#[derive(Debug)]
157pub struct ElfLoaderError {
158 pub message: String,
159}
160
161impl ElfHeader {
162 pub fn parse(buffer: &[u8]) -> Result<Self, ElfHeaderParseError> {
163 if buffer.len() < 64 {
164 return Err(ElfHeaderParseError {
165 kind: ElfHeaderParseErrorKind::InvalidData,
166 message: "ELF header too small".to_string(),
167 });
168 }
169
170 if buffer[EI_MAG0] != ELFMAG[0] || buffer[EI_MAG1] != ELFMAG[1] ||
171 buffer[EI_MAG2] != ELFMAG[2] || buffer[EI_MAG3] != ELFMAG[3] {
172 return Err(ElfHeaderParseError {
173 kind: ElfHeaderParseErrorKind::InvalidMagicNumber,
174 message: "Invalid ELF magic number".to_string(),
175 });
176 }
177
178 let ei_class = buffer[EI_CLASS];
179 if ei_class != ELFCLASS64 {
180 return Err(ElfHeaderParseError {
181 kind: ElfHeaderParseErrorKind::UnsupportedClass,
182 message: "Only 64-bit ELF is supported".to_string(),
183 });
184 }
185
186 let ei_data = buffer[EI_DATA];
188 let is_little_endian = ei_data == ELFDATA2LSB;
189 let e_type = read_u16(buffer, 16, is_little_endian);
190 let e_machine = read_u16(buffer, 18, is_little_endian);
191 let e_version = read_u32(buffer, 20, is_little_endian);
192 let e_entry = read_u64(buffer, 24, is_little_endian);
193 let e_phoff = read_u64(buffer, 32, is_little_endian);
194 let e_shoff = read_u64(buffer, 40, is_little_endian);
195 let e_flags = read_u32(buffer, 48, is_little_endian);
196 let e_ehsize = read_u16(buffer, 52, is_little_endian);
197 let e_phentsize = read_u16(buffer, 54, is_little_endian);
198 let e_phnum = read_u16(buffer, 56, is_little_endian);
199 let e_shentsize = read_u16(buffer, 58, is_little_endian);
200 let e_shnum = read_u16(buffer, 60, is_little_endian);
201 let e_shstrndx = read_u16(buffer, 62, is_little_endian);
202
203 Ok(Self {
204 ei_class,
205 ei_data,
206 e_type,
207 e_machine,
208 e_version,
209 e_entry,
210 e_phoff,
211 e_shoff,
212 e_flags,
213 e_ehsize,
214 e_phentsize,
215 e_phnum,
216 e_shentsize,
217 e_shnum,
218 e_shstrndx,
219 })
220 }
221}
222
223impl ProgramHeader {
224 pub fn parse(buffer: &[u8], is_little_endian: bool) -> Result<Self, ProgramHeaderParseError> {
225 if buffer.len() < 56 {
226 return Err(ProgramHeaderParseError {
227 kind: ProgramHeaderParseErrorKind::InvalidSize,
228 message: "Program header too small".to_string(),
229 });
230 }
231
232 let p_type = read_u32(buffer, 0, is_little_endian);
234 let p_flags = read_u32(buffer, 4, is_little_endian);
235 let p_offset = read_u64(buffer, 8, is_little_endian);
236 let p_vaddr = read_u64(buffer, 16, is_little_endian);
237 let p_paddr = read_u64(buffer, 24, is_little_endian);
238 let p_filesz = read_u64(buffer, 32, is_little_endian);
239 let p_memsz = read_u64(buffer, 40, is_little_endian);
240 let p_align = read_u64(buffer, 48, is_little_endian);
241
242 Ok(Self {
243 p_type,
244 p_flags,
245 p_offset,
246 p_vaddr,
247 p_paddr,
248 p_filesz,
249 p_memsz,
250 p_align,
251 })
252 }
253}
254
255#[derive(Debug)]
256pub struct LoadedSegment {
257 pub vaddr: u64, pub size: u64, pub flags: u32, }
261
262pub fn load_elf_into_task(file: &mut File, task: &mut Task) -> Result<u64, ElfLoaderError> {
280 file.seek(SeekFrom::Start(0)).map_err(|e| ElfLoaderError {
282 message: format!("Failed to seek to start of file: {:?}", e),
283 })?;
284 let mut header_buffer = vec![0u8; 64]; file.read(&mut header_buffer).map_err(|e| ElfLoaderError {
287 message: format!("Failed to read ELF header: {:?}", e),
288 })?;
289
290 let header = match ElfHeader::parse(&header_buffer) {
291 Ok(header) => header,
292 Err(e) => return Err(ElfLoaderError {
293 message: format!("Failed to parse ELF header: {:?}", e),
294 }),
295 };
296 for i in 0..header.e_phnum {
298 let offset = header.e_phoff + (i as u64) * (header.e_phentsize as u64);
300 file.seek(SeekFrom::Start(offset)).map_err(|e| ElfLoaderError {
301 message: format!("Failed to seek to program header: {:?}", e),
302 })?;
303
304 let mut ph_buffer = vec![0u8; header.e_phentsize as usize];
306 file.read(&mut ph_buffer).map_err(|e| ElfLoaderError {
307 message: format!("Failed to read program header: {:?}", e),
308 })?;
309
310 let ph = match ProgramHeader::parse(&ph_buffer, header.ei_data == ELFDATA2LSB) {
311 Ok(ph) => ph,
312 Err(e) => return Err(ElfLoaderError {
313 message: format!("Failed to parse program header: {:?}", e),
314 }),
315 };
316
317 if ph.p_type == PT_LOAD {
319 if ph.p_vaddr % PAGE_SIZE as u64 != 0 {
320 return Err(ElfLoaderError {
321 message: format!("Segment virtual address is not aligned: {:#x}", ph.p_vaddr),
322 });
323 }
324 let aligned_size = if ph.p_memsz % PAGE_SIZE as u64 != 0 {
325 (ph.p_memsz / PAGE_SIZE as u64 + 1) * PAGE_SIZE as u64
326 } else {
327 ph.p_memsz
328 };
329 map_elf_segment(task, ph.p_vaddr as usize, aligned_size as usize, ph.p_align as usize, ph.p_flags).map_err(|e| ElfLoaderError {
331 message: format!("Failed to map ELF segment: {:?}", e),
332 })?;
333
334
335 let segment_type = if ph.p_flags & PF_X != 0 {
337 VirtualMemoryRegion::Text
338 } else if ph.p_flags & PF_W != 0 || ph.p_flags & PF_R != 0 {
339 VirtualMemoryRegion::Data
340 } else {
341 VirtualMemoryRegion::Unknown
342 };
343
344 match segment_type {
345 VirtualMemoryRegion::Text => {
346 task.text_size += aligned_size as usize;
347 },
348 VirtualMemoryRegion::Data => {
349 task.data_size += aligned_size as usize;
350 },
351 _ => {
352 return Err(ElfLoaderError {
353 message: format!("Unknown segment type: {:#x}", ph.p_flags),
354 });
355 }
356 }
357
358 let mut segment_data = vec![0u8; ph.p_filesz as usize];
360
361 file.seek(SeekFrom::Start(ph.p_offset)).map_err(|e| ElfLoaderError {
363 message: format!("Failed to seek to segment data: {:?}", e),
364 })?;
365
366 file.read(&mut segment_data).map_err(|e| ElfLoaderError {
368 message: format!("Failed to read segment data: {:?}", e),
369 })?;
370
371 let vaddr = ph.p_vaddr as usize;
373 match task.vm_manager.translate_vaddr(vaddr) {
374 Some(paddr) => {
375 unsafe {
376 core::ptr::copy_nonoverlapping(
377 segment_data.as_ptr(),
378 paddr as *mut u8,
379 ph.p_filesz as usize
380 );
381 }
382
383 if ph.p_memsz > ph.p_filesz {
385 let zero_start = paddr + ph.p_filesz as usize;
386 let zero_size = ph.p_memsz as usize - ph.p_filesz as usize;
387 unsafe {
388 core::ptr::write_bytes(zero_start as *mut u8, 0, zero_size);
389 }
390 }
391 },
392 None => return Err(ElfLoaderError {
393 message: format!("Failed to translate virtual address: {:#x} for segment at offset {:#x}", vaddr, ph.p_offset),
394 }),
395 }
396 }
397 }
398
399 Ok(header.e_entry)
401}
402
403fn map_elf_segment(task: &mut Task, vaddr: usize, size: usize, align: usize, flags: u32) -> Result<(), &'static str> {
404 if align == 0 {
406 return Err("Alignment must be greater than zero");
407 }
408 if size == 0 || size % align != 0 {
410 return Err("Invalid size");
411 }
412 if vaddr % align != 0 {
414 return Err("Address is not aligned");
415 }
416
417 let mut permissions = 0;
419 if flags & PF_R != 0 {
420 permissions |= VirtualMemoryPermission::Read as usize;
421 }
422 if flags & PF_W != 0 {
423 permissions |= VirtualMemoryPermission::Write as usize;
424 }
425 if flags & PF_X != 0 {
426 permissions |= VirtualMemoryPermission::Execute as usize;
427 }
428 if task.task_type == TaskType::User {
429 permissions |= VirtualMemoryPermission::User as usize;
430 }
431
432 let vmarea = MemoryArea {
434 start: vaddr,
435 end: vaddr + size - 1,
436 };
437
438 if task.vm_manager.search_memory_map(vaddr).is_some() {
440 return Err("Memory area overlaps with existing mapping");
441 }
442
443 let num_of_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
445 let pages = allocate_raw_pages(num_of_pages);
446 let ptr = pages as *mut u8;
447 if ptr.is_null() {
448 return Err("Failed to allocate memory");
449 }
450 let pmarea = MemoryArea {
451 start: ptr as usize,
452 end: (ptr as usize) + size - 1,
453 };
454
455 let map = VirtualMemoryMap {
457 vmarea,
458 pmarea,
459 permissions,
460 is_shared: false, };
462
463 if let Err(e) = task.vm_manager.add_memory_map(map) {
465 free_raw_pages(pages, num_of_pages);
466 return Err(e);
467 }
468
469 for i in 0..num_of_pages {
471 task.add_managed_page(ManagedPage {
472 vaddr: vaddr + i * PAGE_SIZE,
473 page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
474 });
475 }
476
477 Ok(())
478}
479
480#[cfg(test)]
481mod tests;