1pub mod syscall;
6pub mod elf_loader;
7
8extern crate alloc;
9
10use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec};
11use spin::Mutex;
12
13use crate::{arch::{get_cpu, vcpu::Vcpu}, environment::{DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE, KERNEL_VM_STACK_END, PAGE_SIZE}, fs::{File, VfsManager}, library::std::print, mem::page::{allocate_raw_pages, free_boxed_page, Page}, println, sched::scheduler::get_scheduler, vm::{manager::VirtualMemoryManager, user_kernel_vm_init, user_vm_init, vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion}}};
14use crate::abi::{scarlet::ScarletAbi, AbiModule};
15
16const NUM_OF_FDS: usize = 256;
20
21#[derive(Debug, PartialEq, Clone, Copy)]
22pub enum TaskState {
23 NotInitialized,
24 Ready,
25 Running,
26 Blocked,
27 Zombie,
28 Terminated,
29}
30
31#[derive(Debug, PartialEq, Clone, Copy)]
32pub enum TaskType {
33 Kernel,
34 User,
35}
36
37pub struct Task {
38 id: usize,
39 pub name: String,
40 pub priority: u32,
41 pub vcpu: Vcpu,
42 pub state: TaskState,
43 pub task_type: TaskType,
44 pub entry: usize,
45 pub brk: Option<usize>, pub stack_size: usize, pub data_size: usize, pub text_size: usize, pub max_stack_size: usize, pub max_data_size: usize, pub max_text_size: usize, pub vm_manager: VirtualMemoryManager,
53 pub managed_pages: Vec<ManagedPage>,
57 parent_id: Option<usize>, children: Vec<usize>, exit_status: Option<i32>, pub abi: Option<Box<dyn AbiModule>>,
63
64 fd_table: Vec<usize>,
66 files: [Option<File>; 256],
67 pub cwd: Option<String>,
69
70 pub vfs: Option<Arc<VfsManager>>,
94}
95
96#[derive(Debug, Clone)]
97pub struct ManagedPage {
98 pub vaddr: usize,
99 pub page: Box<Page>,
100}
101
102static TASK_ID: Mutex<usize> = Mutex::new(1);
103
104impl Task {
105 pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
106 let mut taskid = TASK_ID.lock();
107 let mut task = Task {
108 id: *taskid,
109 name,
110 priority,
111 vcpu: Vcpu::new(match task_type {
112 TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
113 TaskType::User => crate::arch::vcpu::Mode::User,
114 }),
115 state: TaskState::NotInitialized,
116 task_type,
117 entry: 0,
118 brk: None,
119 stack_size: 0,
120 data_size: 0,
121 text_size: 0,
122 max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
123 max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
124 max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
125 vm_manager: VirtualMemoryManager::new(),
126 managed_pages: Vec::new(),
127 parent_id: None,
128 children: Vec::new(),
129 exit_status: None,
130 abi: Some(Box::new(ScarletAbi::default())), fd_table: Vec::new(),
132 files: [ const { None }; NUM_OF_FDS],
133 cwd: None,
134 vfs: None,
135 };
136
137 for i in (0..NUM_OF_FDS).rev() {
138 task.fd_table.push(i);
139 }
140
141 *taskid += 1;
142 task
143 }
144
145 pub fn init(&mut self) {
146 match self.task_type {
147 TaskType::Kernel => {
148 user_kernel_vm_init(self);
149 self.vcpu.set_sp(KERNEL_VM_STACK_END + 1);
151
152 },
153 TaskType::User => {
154 user_vm_init(self);
155 self.vcpu.set_sp(0xffff_ffff_ffff_f000);
157 }
158 }
159
160 self.state = TaskState::Ready;
162 }
163
164 pub fn get_id(&self) -> usize {
165 self.id
166 }
167
168 pub fn set_state(&mut self, state: TaskState) {
174 self.state = state;
175 }
176
177 pub fn get_state(&self) -> TaskState {
183 self.state
184 }
185
186 pub fn get_size(&self) -> usize {
191 self.stack_size + self.text_size + self.data_size
192 }
193
194 pub fn get_brk(&self) -> usize {
199 if self.brk.is_none() {
200 return self.text_size + self.data_size;
201 }
202 self.brk.unwrap()
203 }
204
205 pub fn set_brk(&mut self, brk: usize) -> Result<(), &'static str> {
213 if brk < self.text_size {
215 return Err("Invalid address");
216 }
217 let prev_brk = self.get_brk();
218 if brk < prev_brk {
219 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
222 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
223 let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
224 self.free_data_pages(addr, num_of_pages);
225 } else if brk > prev_brk {
226 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
229 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
230 let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
231
232 if num_of_pages > 0 {
233 match self.vm_manager.search_memory_map(prev_addr) {
234 Some(_) => {},
235 None => {
236 match self.allocate_data_pages(prev_addr, num_of_pages) {
237 Ok(_) => {},
238 Err(_) => return Err("Failed to allocate pages"),
239 }
240 },
241 }
242 }
243 }
244 self.brk = Some(brk);
245 Ok(())
246 }
247
248 pub fn allocate_pages(&mut self, vaddr: usize, num_of_pages: usize, permissions: usize) -> Result<VirtualMemoryMap, &'static str> {
266
267 if vaddr % PAGE_SIZE != 0 {
268 return Err("Address is not page aligned");
269 }
270
271 let pages = allocate_raw_pages(num_of_pages);
272 let size = num_of_pages * PAGE_SIZE;
273 let paddr = pages as usize;
274 let mmap = VirtualMemoryMap {
275 pmarea: MemoryArea {
276 start: paddr,
277 end: paddr + size - 1,
278 },
279 vmarea: MemoryArea {
280 start: vaddr,
281 end: vaddr + size - 1,
282 },
283 permissions,
284 is_shared: false, };
286 self.vm_manager.add_memory_map(mmap).map_err(|e| panic!("Failed to add memory map: {}", e))?;
287
288 for i in 0..num_of_pages {
289 let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
290 let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
291 self.add_managed_page(ManagedPage {
292 vaddr,
293 page
294 });
295 }
296
297
298 Ok(mmap)
299 }
300
301 pub fn free_pages(&mut self, vaddr: usize, num_of_pages: usize) {
307 let page = vaddr / PAGE_SIZE;
308 for p in 0..num_of_pages {
309 let vaddr = (page + p) * PAGE_SIZE;
310 match self.vm_manager.search_memory_map_idx(vaddr) {
311 Some(idx) => {
312 let mmap = self.vm_manager.remove_memory_map(idx).unwrap();
313 if p == 0 && mmap.vmarea.start < vaddr {
314 let size = vaddr - mmap.vmarea.start;
316 let paddr = mmap.pmarea.start;
317 let mmap1 = VirtualMemoryMap {
318 pmarea: MemoryArea {
319 start: paddr,
320 end: paddr + size - 1,
321 },
322 vmarea: MemoryArea {
323 start: mmap.vmarea.start,
324 end: vaddr - 1,
325 },
326 permissions: mmap.permissions,
327 is_shared: mmap.is_shared,
328 };
329 self.vm_manager.add_memory_map(mmap1)
330 .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
331 }
334 if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
335 let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
337 let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
338 let mmap2 = VirtualMemoryMap {
339 pmarea: MemoryArea {
340 start: paddr,
341 end: paddr + size - 1,
342 },
343 vmarea: MemoryArea {
344 start: vaddr + PAGE_SIZE,
345 end: mmap.vmarea.end,
346 },
347 permissions: mmap.permissions,
348 is_shared: mmap.is_shared,
349 };
350 self.vm_manager.add_memory_map(mmap2)
351 .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
352 }
355 if let Some(free_page) = self.remove_managed_page(vaddr) {
359 free_boxed_page(free_page);
360 }
361
362 },
364 None => {},
365 }
366 }
367 let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
369 for p in 0..num_of_pages {
370 let vaddr = (page + p) * PAGE_SIZE;
371 root_pagetable.unmap(vaddr);
372 }
373 }
374
375 pub fn allocate_text_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
388 let permissions = VirtualMemoryRegion::Text.default_permissions();
389 let res = self.allocate_pages(vaddr, num_of_pages, permissions);
390 if res.is_ok() {
391 self.text_size += num_of_pages * PAGE_SIZE;
392 }
393 res
394 }
395
396 pub fn free_text_pages(&mut self, vaddr: usize, num_of_pages: usize) {
403 self.free_pages(vaddr, num_of_pages);
404 self.text_size -= num_of_pages * PAGE_SIZE;
405 }
406
407 pub fn allocate_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
420 let permissions = VirtualMemoryRegion::Stack.default_permissions();
421 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
422 self.stack_size += num_of_pages * PAGE_SIZE;
423 Ok(res)
424 }
425
426 pub fn free_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) {
433 self.free_pages(vaddr, num_of_pages);
434 self.stack_size -= num_of_pages * PAGE_SIZE;
435 }
436
437 pub fn allocate_data_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
450 let permissions = VirtualMemoryRegion::Data.default_permissions();
451 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
452 self.data_size += num_of_pages * PAGE_SIZE;
453 Ok(res)
454 }
455
456 pub fn free_data_pages(&mut self, vaddr: usize, num_of_pages: usize) {
463 self.free_pages(vaddr, num_of_pages);
464 self.data_size -= num_of_pages * PAGE_SIZE;
465 }
466
467 pub fn allocate_guard_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
484 let permissions = VirtualMemoryRegion::Guard.default_permissions();
485 let mmap = VirtualMemoryMap {
486 pmarea: MemoryArea {
487 start: 0,
488 end: 0,
489 },
490 vmarea: MemoryArea {
491 start: vaddr,
492 end: vaddr + num_of_pages * PAGE_SIZE - 1,
493 },
494 permissions,
495 is_shared: VirtualMemoryRegion::Guard.is_shareable(), };
497 Ok(mmap)
498 }
499
500 pub fn add_managed_page(&mut self, pages: ManagedPage) {
510 self.managed_pages.push(pages);
511 }
512
513 fn get_managed_page(&self, vaddr: usize) -> Option<&ManagedPage> {
522 for page in &self.managed_pages {
523 if page.vaddr == vaddr {
524 return Some(page);
525 }
526 }
527 None
528 }
529
530 fn remove_managed_page(&mut self, vaddr: usize) -> Option<Box<Page>> {
539 for i in 0..self.managed_pages.len() {
540 if self.managed_pages[i].vaddr == vaddr {
541 let page = self.managed_pages.remove(i);
542 return Some(page.page);
543 }
544 }
545 None
546 }
547
548 pub fn set_entry_point(&mut self, entry: usize) {
550 self.vcpu.set_pc(entry as u64);
551 }
552
553 pub fn get_parent_id(&self) -> Option<usize> {
558 self.parent_id
559 }
560
561 pub fn set_parent_id(&mut self, parent_id: usize) {
566 self.parent_id = Some(parent_id);
567 }
568
569 pub fn add_child(&mut self, child_id: usize) {
574 if !self.children.contains(&child_id) {
575 self.children.push(child_id);
576 }
577 }
578
579 pub fn remove_child(&mut self, child_id: usize) -> bool {
587 if let Some(pos) = self.children.iter().position(|&id| id == child_id) {
588 self.children.remove(pos);
589 true
590 } else {
591 false
592 }
593 }
594
595 pub fn get_children(&self) -> &Vec<usize> {
600 &self.children
601 }
602
603 pub fn set_exit_status(&mut self, status: i32) {
608 self.exit_status = Some(status);
609 }
610
611 pub fn get_exit_status(&self) -> Option<i32> {
616 self.exit_status
617 }
618
619 pub fn get_fd_table(&self) -> &Vec<usize> {
625 &self.fd_table
626 }
627
628 pub fn get_file(&self, index: usize) -> Option<&File> {
637 if index < NUM_OF_FDS {
638 self.files[index].as_ref()
639 } else {
640 None
641 }
642 }
643
644 pub fn get_mut_file(&mut self, fd: usize) -> Option<&mut File> {
653 if fd < NUM_OF_FDS {
654 self.files[fd].as_mut()
655 } else {
656 None
657 }
658 }
659
660 pub fn set_file(&mut self, fd: usize, file: File) -> Result<(), &'static str> {
670 if fd < NUM_OF_FDS {
671 self.files[fd] = Some(file);
672 Ok(())
673 } else {
674 Err("Index out of bounds")
675 }
676 }
677
678 pub fn add_file(&mut self, file: File) -> Result<usize, &'static str> {
687 if let Some(fd) = self.allocate_fd() {
688 self.files[fd] = Some(file);
689 Ok(fd)
690 } else {
691 Err("File descriptor table is full")
692 }
693 }
694
695 pub fn remove_file(&mut self, fd: usize) -> Result<(), &'static str> {
704 if fd < NUM_OF_FDS {
705 if self.files[fd].is_none() {
706 return Err("File descriptor is already empty");
707 }
708 self.files[fd] = None;
709 self.fd_table.push(fd);
710 Ok(())
711 } else {
712 Err("File descriptor out of bounds")
713 }
714 }
715
716 fn allocate_fd(&mut self) -> Option<usize> {
722 if let Some(fd) = self.fd_table.pop() {
723 Some(fd)
724 } else {
725 None
726 }
727 }
728
729
730
731 pub fn clone_task(&mut self) -> Result<Task, &'static str> {
740 let mut child = Task::new(
742 self.name.clone(),
743 self.priority,
744 self.task_type
745 );
746
747 match self.task_type {
749 TaskType::Kernel => {
750 child.init();
752 },
753 TaskType::User => {
754 use crate::arch::vm::alloc_virtual_address_space;
757 let asid = alloc_virtual_address_space();
758 child.vm_manager.set_asid(asid);
759 child.state = TaskState::Ready;
760 }
761 }
762
763 for mmap in self.vm_manager.get_memmap() {
765 let num_pages = (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
766 let vaddr = mmap.vmarea.start;
767
768 if num_pages > 0 {
769 if mmap.is_shared {
770 let shared_mmap = VirtualMemoryMap {
772 pmarea: mmap.pmarea, vmarea: mmap.vmarea, permissions: mmap.permissions,
775 is_shared: true,
776 };
777 child.vm_manager.add_memory_map(shared_mmap)
779 .map_err(|_| "Failed to add shared memory map to child task")?;
780
781 if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
784 let root_pagetable = child.vm_manager.get_root_page_table().unwrap();
786 root_pagetable.map_memory_area(shared_mmap)?;
787 }
788
789 } else {
790 let permissions = mmap.permissions;
792 let pages = allocate_raw_pages(num_pages);
793 let size = num_pages * PAGE_SIZE;
794 let paddr = pages as usize;
795 let new_mmap = VirtualMemoryMap {
796 pmarea: MemoryArea {
797 start: paddr,
798 end: paddr + (size - 1),
799 },
800 vmarea: MemoryArea {
801 start: vaddr,
802 end: vaddr + (size - 1),
803 },
804 permissions,
805 is_shared: false,
806 };
807
808 for i in 0..num_pages {
810 let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
811 let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
812 unsafe {
813 core::ptr::copy_nonoverlapping(
814 src_page_addr as *const u8,
815 dst_page_addr as *mut u8,
816 PAGE_SIZE
817 );
818 }
819 child.add_managed_page(ManagedPage {
821 vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
822 page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
823 });
824 }
825 child.vm_manager.add_memory_map(new_mmap)
827 .map_err(|_| "Failed to add memory map to child task")?;
828 }
829 }
830 }
831 child.vcpu.regs = self.vcpu.regs.clone();
833
834 child.stack_size = self.stack_size;
836 child.data_size = self.data_size;
837 child.text_size = self.text_size;
838 child.max_stack_size = self.max_stack_size;
839 child.max_data_size = self.max_data_size;
840 child.max_text_size = self.max_text_size;
841
842 child.entry = self.entry;
844 child.vcpu.set_pc(self.vcpu.get_pc());
845
846 child.fd_table = self.fd_table.clone();
848 child.files = self.files.clone();
849
850 if let Some(vfs) = &self.vfs {
852 child.vfs = Some(vfs.clone());
853 } else {
854 child.vfs = None;
855 }
856
857 child.cwd = self.cwd.clone();
859
860 child.state = self.state;
862
863 child.set_parent_id(self.id);
865 self.add_child(child.get_id());
866
867 Ok(child)
868 }
869
870 pub fn exit(&mut self, status: i32) {
876 match self.parent_id {
877 Some(parent_id) => {
878 if get_scheduler().get_task_by_id(parent_id).is_none() {
879 self.state = TaskState::Terminated;
880 return;
881 }
882 self.set_exit_status(status);
884 self.state = TaskState::Zombie;
885 },
886 None => {
887 self.state = TaskState::Terminated;
889 }
890 }
891 }
892
893 pub fn wait(&mut self, child_id: usize) -> Result<i32, WaitError> {
901 if !self.children.contains(&child_id) {
902 return Err(WaitError::NoSuchChild("No such child task".to_string()));
903 }
904
905 if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
906 if child_task.get_state() == TaskState::Zombie {
907 let status = child_task.get_exit_status().unwrap_or(-1);
908 child_task.set_state(TaskState::Terminated);
909 self.remove_child(child_id);
910 Ok(status)
911 } else {
912 Err(WaitError::ChildNotExited("Child has not exited or is not a zombie".to_string()))
913 }
914 } else {
915 Err(WaitError::ChildTaskNotFound("Child task not found".to_string()))
916 }
917 }
918}
919
920pub enum WaitError {
921 NoSuchChild(String),
922 ChildNotExited(String),
923 ChildTaskNotFound(String),
924}
925
926impl WaitError {
927 pub fn message(&self) -> &str {
928 match self {
929 WaitError::NoSuchChild(msg) => msg,
930 WaitError::ChildNotExited(msg) => msg,
931 WaitError::ChildTaskNotFound(msg) => msg,
932 }
933 }
934}
935
936pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
946 let mut task = Task::new(name, priority, TaskType::Kernel);
947 task.entry = func as usize;
948 task
949}
950
951pub fn new_user_task(name: String, priority: u32) -> Task {
960 Task::new(name, priority, TaskType::User)
961}
962
963pub fn mytask() -> Option<&'static mut Task> {
968 let cpu = get_cpu();
969 get_scheduler().get_current_task(cpu.get_cpuid())
970}
971
972#[cfg(test)]
973mod tests {
974 use alloc::string::ToString;
975
976 #[test_case]
977 fn test_set_brk() {
978 let mut task = super::new_user_task("Task0".to_string(), 0);
979 task.init();
980 assert_eq!(task.get_brk(), 0);
981 task.set_brk(0x1000).unwrap();
982 assert_eq!(task.get_brk(), 0x1000);
983 task.set_brk(0x2000).unwrap();
984 assert_eq!(task.get_brk(), 0x2000);
985 task.set_brk(0x1008).unwrap();
986 assert_eq!(task.get_brk(), 0x1008);
987 task.set_brk(0x1000).unwrap();
988 assert_eq!(task.get_brk(), 0x1000);
989 }
990
991 #[test_case]
992 fn test_task_parent_child_relationship() {
993 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
994 parent_task.init();
995
996 let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
997 child_task.init();
998
999 child_task.set_parent_id(parent_task.get_id());
1001 parent_task.add_child(child_task.get_id());
1002
1003 assert_eq!(child_task.get_parent_id(), Some(parent_task.get_id()));
1005 assert!(parent_task.get_children().contains(&child_task.get_id()));
1006
1007 assert!(parent_task.remove_child(child_task.get_id()));
1009 assert!(!parent_task.get_children().contains(&child_task.get_id()));
1010 }
1011
1012 #[test_case]
1013 fn test_task_exit_status() {
1014 let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
1015 task.init();
1016
1017 assert_eq!(task.get_exit_status(), None);
1019
1020 task.set_exit_status(0);
1022 assert_eq!(task.get_exit_status(), Some(0));
1023
1024 task.set_exit_status(1);
1025 assert_eq!(task.get_exit_status(), Some(1));
1026 }
1027
1028 #[test_case]
1029 fn test_clone_task_memory_copy() {
1030 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1031 parent_task.init();
1032
1033 let vaddr = 0x1000;
1035 let num_pages = 2;
1036 let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
1037
1038 let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
1040 unsafe {
1041 let dst_ptr = mmap.pmarea.start as *mut u8;
1042 core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
1043 }
1044
1045 let parent_memmap_count = parent_task.vm_manager.get_memmap().len();
1047 let parent_id = parent_task.get_id();
1048
1049 let child_task = parent_task.clone_task().unwrap();
1051
1052 let child_memmap_count = child_task.vm_manager.get_memmap().len();
1054
1055 assert_eq!(child_memmap_count, parent_memmap_count,
1057 "Child should have the same number of memory maps as parent: child={}, parent={}",
1058 child_memmap_count, parent_memmap_count);
1059
1060 assert_eq!(child_task.get_parent_id(), Some(parent_id));
1062 assert!(parent_task.get_children().contains(&child_task.get_id()));
1063
1064 assert_eq!(child_task.stack_size, parent_task.stack_size);
1066 assert_eq!(child_task.data_size, parent_task.data_size);
1067 assert_eq!(child_task.text_size, parent_task.text_size);
1068
1069 let child_memmaps = child_task.vm_manager.get_memmap();
1071 let child_mmap = child_memmaps.iter()
1072 .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1073 .expect("Test memory map not found in child task");
1074
1075 let parent_memmaps = parent_task.vm_manager.get_memmap();
1077 let parent_test_mmap = parent_memmaps.iter()
1078 .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1079 .expect("Test memory map not found in parent task");
1080
1081 assert_eq!(child_mmap.vmarea.start, parent_test_mmap.vmarea.start);
1083 assert_eq!(child_mmap.vmarea.end, parent_test_mmap.vmarea.end);
1084 assert_eq!(child_mmap.permissions, parent_test_mmap.permissions);
1085
1086 unsafe {
1088 let parent_ptr = mmap.pmarea.start as *const u8;
1089 let child_ptr = child_mmap.pmarea.start as *const u8;
1090
1091 assert_ne!(parent_ptr, child_ptr, "Parent and child should have different physical memory");
1093
1094 for i in 0..test_data.len() {
1096 let parent_byte = *parent_ptr.offset(i as isize);
1097 let child_byte = *child_ptr.offset(i as isize);
1098 assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
1099 }
1100 }
1101
1102 unsafe {
1104 let parent_ptr = mmap.pmarea.start as *mut u8;
1105 let original_value = *parent_ptr;
1106 *parent_ptr = 0xFF; let child_ptr = child_mmap.pmarea.start as *const u8;
1109 let child_first_byte = *child_ptr;
1110
1111 assert_eq!(child_first_byte, original_value, "Child memory should be independent from parent");
1113 }
1114
1115 assert_eq!(child_task.vcpu.get_pc(), parent_task.vcpu.get_pc());
1117
1118 assert_eq!(child_task.entry, parent_task.entry);
1120
1121 assert_eq!(child_task.state, parent_task.state);
1123
1124 assert!(child_task.managed_pages.len() >= num_pages,
1126 "Child should have at least the test pages in managed pages");
1127 }
1128
1129 #[test_case]
1130 fn test_clone_task_stack_copy() {
1131 let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
1132 parent_task.init();
1133
1134 let stack_mmap = parent_task.vm_manager.get_memmap().iter()
1136 .find(|mmap| {
1137 use crate::vm::vmem::VirtualMemoryRegion;
1139 mmap.vmarea.end == crate::environment::USER_STACK_TOP - 1 &&
1140 mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1141 })
1142 .expect("Stack memory map not found in parent task")
1143 .clone();
1144
1145 let stack_test_data: [u8; 16] = [
1147 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22,
1148 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0x00
1149 ];
1150 unsafe {
1151 let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1152 core::ptr::copy_nonoverlapping(stack_test_data.as_ptr(), stack_ptr, stack_test_data.len());
1153 }
1154
1155 let child_task = parent_task.clone_task().unwrap();
1157
1158 let child_stack_mmap = child_task.vm_manager.get_memmap().iter()
1160 .find(|mmap| {
1161 use crate::vm::vmem::VirtualMemoryRegion;
1162 mmap.vmarea.start == stack_mmap.vmarea.start &&
1163 mmap.vmarea.end == stack_mmap.vmarea.end &&
1164 mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1165 })
1166 .expect("Stack memory map not found in child task");
1167
1168 unsafe {
1170 let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1171 let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1172
1173 assert_ne!(parent_stack_ptr, child_stack_ptr,
1175 "Parent and child should have different stack physical memory");
1176
1177 for i in 0..stack_test_data.len() {
1179 let parent_byte = *parent_stack_ptr.offset(i as isize);
1180 let child_byte = *child_stack_ptr.offset(i as isize);
1181 assert_eq!(parent_byte, child_byte,
1182 "Stack data mismatch at offset {}: parent={:#x}, child={:#x}",
1183 i, parent_byte, child_byte);
1184 }
1185 }
1186
1187 unsafe {
1189 let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1190 let original_value = *parent_stack_ptr;
1191 *parent_stack_ptr = 0xFE; let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1194 let child_first_byte = *child_stack_ptr;
1195
1196 assert_eq!(child_first_byte, original_value,
1198 "Child stack should be independent from parent stack");
1199 }
1200
1201 assert_eq!(child_task.stack_size, parent_task.stack_size,
1203 "Child and parent should have the same stack size");
1204 }
1205
1206 #[test_case]
1207 fn test_clone_task_shared_memory() {
1208 use crate::vm::vmem::{VirtualMemoryMap, MemoryArea, VirtualMemoryPermission};
1209 use crate::mem::page::allocate_raw_pages;
1210 use crate::environment::PAGE_SIZE;
1211
1212 let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
1213 parent_task.init();
1214
1215 let shared_vaddr = 0x5000;
1217 let num_pages = 1;
1218 let pages = allocate_raw_pages(num_pages);
1219 let paddr = pages as usize;
1220
1221 let shared_mmap = VirtualMemoryMap {
1222 pmarea: MemoryArea {
1223 start: paddr,
1224 end: paddr + PAGE_SIZE - 1,
1225 },
1226 vmarea: MemoryArea {
1227 start: shared_vaddr,
1228 end: shared_vaddr + PAGE_SIZE - 1,
1229 },
1230 permissions: VirtualMemoryPermission::Read as usize | VirtualMemoryPermission::Write as usize,
1231 is_shared: true, };
1233
1234 parent_task.vm_manager.add_memory_map(shared_mmap).unwrap();
1236
1237 let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
1239 unsafe {
1240 let shared_ptr = paddr as *mut u8;
1241 core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
1242 }
1243
1244 let child_task = parent_task.clone_task().unwrap();
1246
1247 let child_shared_mmap = child_task.vm_manager.get_memmap().iter()
1249 .find(|mmap| mmap.vmarea.start == shared_vaddr && mmap.is_shared)
1250 .expect("Shared memory map not found in child task");
1251
1252 assert_eq!(child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
1254 "Shared memory should have the same physical address in parent and child");
1255
1256 assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
1258 assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
1259
1260 assert!(child_shared_mmap.is_shared, "Shared memory should remain marked as shared");
1262
1263 unsafe {
1265 let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
1266 let original_value = *child_shared_ptr;
1267 *child_shared_ptr = 0xFF; let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
1270 let parent_first_byte = *parent_shared_ptr;
1271
1272 assert_eq!(parent_first_byte, 0xFF,
1274 "Parent should see changes made through child's shared memory reference");
1275
1276 *child_shared_ptr = original_value;
1278 }
1279
1280 unsafe {
1282 let child_ptr = child_shared_mmap.pmarea.start as *const u8;
1283 let parent_ptr = shared_mmap.pmarea.start as *const u8;
1284
1285 for i in 0..test_data.len() {
1287 let parent_byte = *parent_ptr.offset(i as isize);
1288 let child_byte = *child_ptr.offset(i as isize);
1289 assert_eq!(parent_byte, child_byte,
1290 "Shared memory data should be identical from both parent and child views");
1291 }
1292 }
1293 }
1294}