1pub mod syscall;
6pub mod elf_loader;
7
8extern crate alloc;
9
10use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec};
11use spin::Mutex;
12
13use crate::{arch::{get_cpu, vcpu::Vcpu, vm::alloc_virtual_address_space}, environment::{DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE, KERNEL_VM_STACK_END, PAGE_SIZE}, fs::VfsManager, mem::page::{allocate_raw_pages, free_boxed_page, Page}, object::handle::HandleTable, sched::scheduler::get_scheduler, vm::{manager::VirtualMemoryManager, user_kernel_vm_init, user_vm_init, vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion}}};
14use crate::abi::{scarlet::ScarletAbi, AbiModule};
15use crate::sync::waker::Waker;
16use alloc::collections::BTreeMap;
17use spin::Once;
18
19static TASK_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
21
22static PARENT_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
25
26fn init_task_wakers() -> Mutex<BTreeMap<usize, Waker>> {
28 Mutex::new(BTreeMap::new())
29}
30
31fn init_parent_wakers() -> Mutex<BTreeMap<usize, Waker>> {
33 Mutex::new(BTreeMap::new())
34}
35
36pub fn get_task_waker(task_id: usize) -> &'static Waker {
49 let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
50 let mut wakers = wakers_mutex.lock();
51 if !wakers.contains_key(&task_id) {
52 let waker_name = alloc::format!("task_{}", task_id);
53 let static_name = Box::leak(waker_name.into_boxed_str());
55 wakers.insert(task_id, Waker::new_interruptible(static_name));
56 }
57 unsafe {
60 let waker_ptr = wakers.get(&task_id).unwrap() as *const Waker;
61 &*waker_ptr
62 }
63}
64
65pub fn get_parent_waker(parent_id: usize) -> &'static Waker {
78 let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
79 let mut wakers = wakers_mutex.lock();
80
81 if !wakers.contains_key(&parent_id) {
83 let waker_name = alloc::format!("parent_waker_{}", parent_id);
84 let static_name = alloc::boxed::Box::leak(waker_name.into_boxed_str());
86 wakers.insert(parent_id, Waker::new_interruptible(static_name));
87 }
88
89 unsafe {
92 let waker_ptr = wakers.get(&parent_id).unwrap() as *const Waker;
93 &*waker_ptr
94 }
95}
96
97pub fn wake_task_waiters(task_id: usize) {
106 let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
107 let wakers = wakers_mutex.lock();
108 if let Some(waker) = wakers.get(&task_id) {
109 waker.wake_all();
110 }
111}
112
113pub fn wake_parent_waiters(parent_id: usize) {
121 let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
122 let wakers = wakers_mutex.lock();
123 if let Some(waker) = wakers.get(&parent_id) {
124 waker.wake_all();
125 }
126}
127
128pub fn cleanup_task_waker(task_id: usize) {
137 let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
138 let mut wakers = wakers_mutex.lock();
139 wakers.remove(&task_id);
140}
141
142pub fn cleanup_parent_waker(parent_id: usize) {
150 let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
151 let mut wakers = wakers_mutex.lock();
152 wakers.remove(&parent_id);
153}
154
155#[derive(Debug, PartialEq, Clone, Copy)]
157pub enum BlockedType {
158 Interruptible,
160 Uninterruptible,
162}
163
164#[derive(Debug, PartialEq, Clone, Copy)]
165pub enum TaskState {
166 NotInitialized,
167 Ready,
168 Running,
169 Blocked(BlockedType),
170 Zombie,
171 Terminated,
172}
173
174#[derive(Debug, PartialEq, Clone, Copy)]
175pub enum TaskType {
176 Kernel,
177 User,
178}
179
180pub struct Task {
181 id: usize,
182 pub name: String,
183 pub priority: u32,
184 pub vcpu: Vcpu,
185 pub state: TaskState,
186 pub task_type: TaskType,
187 pub entry: usize,
188 pub brk: Option<usize>, pub stack_size: usize, pub data_size: usize, pub text_size: usize, pub max_stack_size: usize, pub max_data_size: usize, pub max_text_size: usize, pub vm_manager: VirtualMemoryManager,
196 pub managed_pages: Vec<ManagedPage>,
200 parent_id: Option<usize>, children: Vec<usize>, exit_status: Option<i32>, pub abi: Option<Box<dyn AbiModule>>,
206
207 pub cwd: Option<String>,
209
210 pub vfs: Option<Arc<VfsManager>>,
234
235
236
237 pub handle_table: HandleTable,
239}
240
241#[derive(Debug, Clone)]
242pub struct ManagedPage {
243 pub vaddr: usize,
244 pub page: Box<Page>,
245}
246
247pub enum CloneFlagsDef {
248 Vm = 0b00000001, Fs = 0b00000010, Files = 0b00000100, }
252
253#[derive(Debug, Clone, Copy)]
254pub struct CloneFlags {
255 raw: u64,
256}
257
258impl CloneFlags {
259 pub fn new() -> Self {
260 CloneFlags { raw: 0 }
261 }
262
263 pub fn from_raw(raw: u64) -> Self {
264 CloneFlags { raw }
265 }
266
267 pub fn set(&mut self, flag: CloneFlagsDef) {
268 self.raw |= flag as u64;
269 }
270
271 pub fn clear(&mut self, flag: CloneFlagsDef) {
272 self.raw &= !(flag as u64);
273 }
274
275 pub fn is_set(&self, flag: CloneFlagsDef) -> bool {
276 (self.raw & (flag as u64)) != 0
277 }
278
279 pub fn get_raw(&self) -> u64 {
280 self.raw
281 }
282}
283
284impl Default for CloneFlags {
285 fn default() -> Self {
286 let raw = CloneFlagsDef::Fs as u64 | CloneFlagsDef::Files as u64;
287 CloneFlags { raw }
288 }
289}
290
291static TASK_ID: Mutex<usize> = Mutex::new(1);
292
293impl Task {
294 pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
295 let mut taskid = TASK_ID.lock();
296 let task = Task {
297 id: *taskid,
298 name,
299 priority,
300 vcpu: Vcpu::new(match task_type {
301 TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
302 TaskType::User => crate::arch::vcpu::Mode::User,
303 }),
304 state: TaskState::NotInitialized,
305 task_type,
306 entry: 0,
307 brk: None,
308 stack_size: 0,
309 data_size: 0,
310 text_size: 0,
311 max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
312 max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
313 max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
314 vm_manager: VirtualMemoryManager::new(),
315 managed_pages: Vec::new(),
316 parent_id: None,
317 children: Vec::new(),
318 exit_status: None,
319 abi: Some(Box::new(ScarletAbi::default())), cwd: None,
321 vfs: None,
322 handle_table: HandleTable::new(),
323 };
324
325 *taskid += 1;
326 task
327 }
328
329 pub fn init(&mut self) {
330 match self.task_type {
331 TaskType::Kernel => {
332 user_kernel_vm_init(self);
333 self.vcpu.set_sp(KERNEL_VM_STACK_END + 1);
335
336 },
337 TaskType::User => {
338 user_vm_init(self);
339 self.vcpu.set_sp(0xffff_ffff_ffff_f000);
341 }
342 }
343
344 self.state = TaskState::Ready;
346 }
347
348 pub fn get_id(&self) -> usize {
349 self.id
350 }
351
352 pub fn set_state(&mut self, state: TaskState) {
358 self.state = state;
359 }
360
361 pub fn get_state(&self) -> TaskState {
367 self.state
368 }
369
370 pub fn get_size(&self) -> usize {
375 self.stack_size + self.text_size + self.data_size
376 }
377
378 pub fn get_brk(&self) -> usize {
383 if self.brk.is_none() {
384 return self.text_size + self.data_size;
385 }
386 self.brk.unwrap()
387 }
388
389 pub fn set_brk(&mut self, brk: usize) -> Result<(), &'static str> {
397 if brk < self.text_size {
399 return Err("Invalid address");
400 }
401 let prev_brk = self.get_brk();
402 if brk < prev_brk {
403 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
406 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
407 let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
408 self.free_data_pages(addr, num_of_pages);
409 } else if brk > prev_brk {
410 let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
413 let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
414 let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
415
416 if num_of_pages > 0 {
417 match self.vm_manager.search_memory_map(prev_addr) {
418 Some(_) => {},
419 None => {
420 match self.allocate_data_pages(prev_addr, num_of_pages) {
421 Ok(_) => {},
422 Err(_) => return Err("Failed to allocate pages"),
423 }
424 },
425 }
426 }
427 }
428 self.brk = Some(brk);
429 Ok(())
430 }
431
432 pub fn allocate_pages(&mut self, vaddr: usize, num_of_pages: usize, permissions: usize) -> Result<VirtualMemoryMap, &'static str> {
450
451 if vaddr % PAGE_SIZE != 0 {
452 return Err("Address is not page aligned");
453 }
454
455 let pages = allocate_raw_pages(num_of_pages);
456 let size = num_of_pages * PAGE_SIZE;
457 let paddr = pages as usize;
458 let mmap = VirtualMemoryMap {
459 pmarea: MemoryArea {
460 start: paddr,
461 end: paddr + size - 1,
462 },
463 vmarea: MemoryArea {
464 start: vaddr,
465 end: vaddr + size - 1,
466 },
467 permissions,
468 is_shared: false, };
470 self.vm_manager.add_memory_map(mmap).map_err(|e| panic!("Failed to add memory map: {}", e))?;
471
472 for i in 0..num_of_pages {
473 let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
474 let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
475 self.add_managed_page(ManagedPage {
476 vaddr,
477 page
478 });
479 }
480
481
482 Ok(mmap)
483 }
484
485 pub fn free_pages(&mut self, vaddr: usize, num_of_pages: usize) {
491 let page = vaddr / PAGE_SIZE;
492 for p in 0..num_of_pages {
493 let vaddr = (page + p) * PAGE_SIZE;
494 match self.vm_manager.search_memory_map_idx(vaddr) {
495 Some(idx) => {
496 let mmap = self.vm_manager.remove_memory_map(idx).unwrap();
497 if p == 0 && mmap.vmarea.start < vaddr {
498 let size = vaddr - mmap.vmarea.start;
500 let paddr = mmap.pmarea.start;
501 let mmap1 = VirtualMemoryMap {
502 pmarea: MemoryArea {
503 start: paddr,
504 end: paddr + size - 1,
505 },
506 vmarea: MemoryArea {
507 start: mmap.vmarea.start,
508 end: vaddr - 1,
509 },
510 permissions: mmap.permissions,
511 is_shared: mmap.is_shared,
512 };
513 self.vm_manager.add_memory_map(mmap1)
514 .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
515 }
518 if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
519 let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
521 let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
522 let mmap2 = VirtualMemoryMap {
523 pmarea: MemoryArea {
524 start: paddr,
525 end: paddr + size - 1,
526 },
527 vmarea: MemoryArea {
528 start: vaddr + PAGE_SIZE,
529 end: mmap.vmarea.end,
530 },
531 permissions: mmap.permissions,
532 is_shared: mmap.is_shared,
533 };
534 self.vm_manager.add_memory_map(mmap2)
535 .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
536 }
539 if let Some(free_page) = self.remove_managed_page(vaddr) {
543 free_boxed_page(free_page);
544 }
545
546 },
548 None => {},
549 }
550 }
551 let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
553 for p in 0..num_of_pages {
554 let vaddr = (page + p) * PAGE_SIZE;
555 root_pagetable.unmap(vaddr);
556 }
557 }
558
559 pub fn allocate_text_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
572 let permissions = VirtualMemoryRegion::Text.default_permissions();
573 let res = self.allocate_pages(vaddr, num_of_pages, permissions);
574 if res.is_ok() {
575 self.text_size += num_of_pages * PAGE_SIZE;
576 }
577 res
578 }
579
580 pub fn free_text_pages(&mut self, vaddr: usize, num_of_pages: usize) {
587 self.free_pages(vaddr, num_of_pages);
588 self.text_size -= num_of_pages * PAGE_SIZE;
589 }
590
591 pub fn allocate_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
604 let permissions = VirtualMemoryRegion::Stack.default_permissions();
605 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
606 self.stack_size += num_of_pages * PAGE_SIZE;
607 Ok(res)
608 }
609
610 pub fn free_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) {
617 self.free_pages(vaddr, num_of_pages);
618 self.stack_size -= num_of_pages * PAGE_SIZE;
619 }
620
621 pub fn allocate_data_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
634 let permissions = VirtualMemoryRegion::Data.default_permissions();
635 let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
636 self.data_size += num_of_pages * PAGE_SIZE;
637 Ok(res)
638 }
639
640 pub fn free_data_pages(&mut self, vaddr: usize, num_of_pages: usize) {
647 self.free_pages(vaddr, num_of_pages);
648 self.data_size -= num_of_pages * PAGE_SIZE;
649 }
650
651 pub fn allocate_guard_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
668 let permissions = VirtualMemoryRegion::Guard.default_permissions();
669 let mmap = VirtualMemoryMap {
670 pmarea: MemoryArea {
671 start: 0,
672 end: 0,
673 },
674 vmarea: MemoryArea {
675 start: vaddr,
676 end: vaddr + num_of_pages * PAGE_SIZE - 1,
677 },
678 permissions,
679 is_shared: VirtualMemoryRegion::Guard.is_shareable(), };
681 Ok(mmap)
682 }
683
684 pub fn add_managed_page(&mut self, pages: ManagedPage) {
694 self.managed_pages.push(pages);
695 }
696
697 fn get_managed_page(&self, vaddr: usize) -> Option<&ManagedPage> {
706 for page in &self.managed_pages {
707 if page.vaddr == vaddr {
708 return Some(page);
709 }
710 }
711 None
712 }
713
714 fn remove_managed_page(&mut self, vaddr: usize) -> Option<Box<Page>> {
723 for i in 0..self.managed_pages.len() {
724 if self.managed_pages[i].vaddr == vaddr {
725 let page = self.managed_pages.remove(i);
726 return Some(page.page);
727 }
728 }
729 None
730 }
731
732
733 pub fn set_entry_point(&mut self, entry: usize) {
735 self.vcpu.set_pc(entry as u64);
736 }
737
738 pub fn get_parent_id(&self) -> Option<usize> {
743 self.parent_id
744 }
745
746 pub fn set_parent_id(&mut self, parent_id: usize) {
751 self.parent_id = Some(parent_id);
752 }
753
754 pub fn add_child(&mut self, child_id: usize) {
759 if !self.children.contains(&child_id) {
760 self.children.push(child_id);
761 }
762 }
763
764 pub fn remove_child(&mut self, child_id: usize) -> bool {
772 if let Some(pos) = self.children.iter().position(|&id| id == child_id) {
773 self.children.remove(pos);
774 true
775 } else {
776 false
777 }
778 }
779
780 pub fn get_children(&self) -> &Vec<usize> {
785 &self.children
786 }
787
788 pub fn set_exit_status(&mut self, status: i32) {
793 self.exit_status = Some(status);
794 }
795
796 pub fn get_exit_status(&self) -> Option<i32> {
801 self.exit_status
802 }
803
804 pub fn clone_task(&mut self, flags: CloneFlags) -> Result<Task, &'static str> {
820 let mut child = Task::new(
822 self.name.clone(),
823 self.priority,
824 self.task_type
825 );
826
827 match self.task_type {
829 TaskType::Kernel => {
830 child.init();
832 },
833 TaskType::User => {
834 if !flags.is_set(CloneFlagsDef::Vm) {
835 let asid = alloc_virtual_address_space();
838 child.vm_manager.set_asid(asid);
839 }
840 }
841 }
842
843 if !flags.is_set(CloneFlagsDef::Vm) {
844 for mmap in self.vm_manager.get_memmap() {
846 let num_pages = (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
847 let vaddr = mmap.vmarea.start;
848
849 if num_pages > 0 {
850 if mmap.is_shared {
851 let shared_mmap = VirtualMemoryMap {
853 pmarea: mmap.pmarea, vmarea: mmap.vmarea, permissions: mmap.permissions,
856 is_shared: true,
857 };
858 child.vm_manager.add_memory_map(shared_mmap)
860 .map_err(|_| "Failed to add shared memory map to child task")?;
861
862 if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
865 let root_pagetable = child.vm_manager.get_root_page_table().unwrap();
867 root_pagetable.map_memory_area(child.vm_manager.get_asid(), shared_mmap)?;
868 }
869
870 } else {
871 let permissions = mmap.permissions;
873 let pages = allocate_raw_pages(num_pages);
874 let size = num_pages * PAGE_SIZE;
875 let paddr = pages as usize;
876 let new_mmap = VirtualMemoryMap {
877 pmarea: MemoryArea {
878 start: paddr,
879 end: paddr + (size - 1),
880 },
881 vmarea: MemoryArea {
882 start: vaddr,
883 end: vaddr + (size - 1),
884 },
885 permissions,
886 is_shared: false,
887 };
888
889 for i in 0..num_pages {
891 let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
892 let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
893 unsafe {
894 core::ptr::copy_nonoverlapping(
895 src_page_addr as *const u8,
896 dst_page_addr as *mut u8,
897 PAGE_SIZE
898 );
899 }
900 child.add_managed_page(ManagedPage {
902 vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
903 page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
904 });
905 }
906 child.vm_manager.add_memory_map(new_mmap)
908 .map_err(|_| "Failed to add memory map to child task")?;
909 }
910 }
911 }
912 }
913
914 child.vcpu.regs = self.vcpu.regs.clone();
916
917 if let Some(abi) = &self.abi {
919 child.abi = Some(abi.clone_boxed());
920 } else {
921 child.abi = None; }
923
924 child.stack_size = self.stack_size;
926 child.data_size = self.data_size;
927 child.text_size = self.text_size;
928 child.max_stack_size = self.max_stack_size;
929 child.max_data_size = self.max_data_size;
930 child.max_text_size = self.max_text_size;
931
932 child.entry = self.entry;
934 child.vcpu.set_pc(self.vcpu.get_pc());
935
936 if flags.is_set(CloneFlagsDef::Files) {
937 child.handle_table = self.handle_table.clone();
939 }
940
941 if flags.is_set(CloneFlagsDef::Fs) {
942 if let Some(vfs) = &self.vfs {
944 child.vfs = Some(vfs.clone());
945 child.cwd = self.cwd.clone();
947 } else {
948 child.vfs = None;
949 child.cwd = None; }
951 }
952
953 if let Some(abi) = &self.abi {
955 child.abi = Some(abi.clone_boxed());
956 } else {
957 child.abi = None; }
959
960 child.state = self.state;
962
963 child.set_parent_id(self.id);
965 self.add_child(child.get_id());
966
967 Ok(child)
968 }
969
970 pub fn exit(&mut self, status: i32) {
976 self.handle_table.close_all();
980
981 match self.parent_id {
982 Some(parent_id) => {
983 if get_scheduler().get_task_by_id(parent_id).is_none() {
984 self.state = TaskState::Terminated;
986 return;
987 }
988 self.set_exit_status(status);
990 self.state = TaskState::Zombie;
991 },
993 None => {
994 self.state = TaskState::Terminated;
997 }
998 }
999 }
1000
1001 pub fn wait(&mut self, child_id: usize) -> Result<i32, WaitError> {
1009 if !self.children.contains(&child_id) {
1010 return Err(WaitError::NoSuchChild("No such child task".to_string()));
1011 }
1012
1013 if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
1014 if child_task.get_state() == TaskState::Zombie {
1015 let status = child_task.get_exit_status().unwrap_or(-1);
1016 child_task.set_state(TaskState::Terminated);
1017 self.remove_child(child_id);
1018 Ok(status)
1019 } else {
1020 Err(WaitError::ChildNotExited("Child has not exited or is not a zombie".to_string()))
1021 }
1022 } else {
1023 Err(WaitError::ChildTaskNotFound("Child task not found".to_string()))
1024 }
1025 }
1026
1027 pub fn set_vfs(&mut self, vfs: Arc<VfsManager>) {
1034 self.vfs = Some(vfs);
1035 }
1036
1037 pub fn get_vfs(&self) -> Option<&Arc<VfsManager>> {
1039 self.vfs.as_ref()
1040 }
1041
1042 pub fn set_cwd(&mut self, cwd: String) {
1044 self.cwd = Some(cwd);
1045 }
1046
1047 pub fn get_cwd(&self) -> Option<&String> {
1049 self.cwd.as_ref()
1050 }
1051}
1052
1053#[derive(Debug)]
1054pub enum WaitError {
1055 NoSuchChild(String),
1056 ChildNotExited(String),
1057 ChildTaskNotFound(String),
1058}
1059
1060impl WaitError {
1061 pub fn message(&self) -> &str {
1062 match self {
1063 WaitError::NoSuchChild(msg) => msg,
1064 WaitError::ChildNotExited(msg) => msg,
1065 WaitError::ChildTaskNotFound(msg) => msg,
1066 }
1067 }
1068}
1069
1070pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
1080 let mut task = Task::new(name, priority, TaskType::Kernel);
1081 task.entry = func as usize;
1082 task
1083}
1084
1085pub fn new_user_task(name: String, priority: u32) -> Task {
1094 Task::new(name, priority, TaskType::User)
1095}
1096
1097pub fn mytask() -> Option<&'static mut Task> {
1102 let cpu = get_cpu();
1103 get_scheduler().get_current_task(cpu.get_cpuid())
1104}
1105
1106pub fn set_current_task_cwd(cwd: String) -> bool {
1114 if let Some(task) = mytask() {
1115 task.set_cwd(cwd);
1116 true
1117 } else {
1118 false
1119 }
1120}
1121
1122#[cfg(test)]
1123mod tests {
1124 use alloc::string::ToString;
1125
1126 use crate::task::CloneFlags;
1127
1128 #[test_case]
1129 fn test_set_brk() {
1130 let mut task = super::new_user_task("Task0".to_string(), 0);
1131 task.init();
1132 assert_eq!(task.get_brk(), 0);
1133 task.set_brk(0x1000).unwrap();
1134 assert_eq!(task.get_brk(), 0x1000);
1135 task.set_brk(0x2000).unwrap();
1136 assert_eq!(task.get_brk(), 0x2000);
1137 task.set_brk(0x1008).unwrap();
1138 assert_eq!(task.get_brk(), 0x1008);
1139 task.set_brk(0x1000).unwrap();
1140 assert_eq!(task.get_brk(), 0x1000);
1141 }
1142
1143 #[test_case]
1144 fn test_task_parent_child_relationship() {
1145 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1146 parent_task.init();
1147
1148 let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
1149 child_task.init();
1150
1151 child_task.set_parent_id(parent_task.get_id());
1153 parent_task.add_child(child_task.get_id());
1154
1155 assert_eq!(child_task.get_parent_id(), Some(parent_task.get_id()));
1157 assert!(parent_task.get_children().contains(&child_task.get_id()));
1158
1159 assert!(parent_task.remove_child(child_task.get_id()));
1161 assert!(!parent_task.get_children().contains(&child_task.get_id()));
1162 }
1163
1164 #[test_case]
1165 fn test_task_exit_status() {
1166 let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
1167 task.init();
1168
1169 assert_eq!(task.get_exit_status(), None);
1171
1172 task.set_exit_status(0);
1174 assert_eq!(task.get_exit_status(), Some(0));
1175
1176 task.set_exit_status(1);
1177 assert_eq!(task.get_exit_status(), Some(1));
1178 }
1179
1180 #[test_case]
1181 fn test_clone_task_memory_copy() {
1182 let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1183 parent_task.init();
1184
1185 let vaddr = 0x1000;
1187 let num_pages = 2;
1188 let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
1189
1190 let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
1192 unsafe {
1193 let dst_ptr = mmap.pmarea.start as *mut u8;
1194 core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
1195 }
1196
1197 let parent_memmap_count = parent_task.vm_manager.get_memmap().len();
1199 let parent_id = parent_task.get_id();
1200
1201 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1203
1204 let child_memmap_count = child_task.vm_manager.get_memmap().len();
1206
1207 assert_eq!(child_memmap_count, parent_memmap_count,
1209 "Child should have the same number of memory maps as parent: child={}, parent={}",
1210 child_memmap_count, parent_memmap_count);
1211
1212 assert_eq!(child_task.get_parent_id(), Some(parent_id));
1214 assert!(parent_task.get_children().contains(&child_task.get_id()));
1215
1216 assert_eq!(child_task.stack_size, parent_task.stack_size);
1218 assert_eq!(child_task.data_size, parent_task.data_size);
1219 assert_eq!(child_task.text_size, parent_task.text_size);
1220
1221 let child_memmaps = child_task.vm_manager.get_memmap();
1223 let child_mmap = child_memmaps.iter()
1224 .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1225 .expect("Test memory map not found in child task");
1226
1227 let parent_memmaps = parent_task.vm_manager.get_memmap();
1229 let parent_test_mmap = parent_memmaps.iter()
1230 .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1231 .expect("Test memory map not found in parent task");
1232
1233 assert_eq!(child_mmap.vmarea.start, parent_test_mmap.vmarea.start);
1235 assert_eq!(child_mmap.vmarea.end, parent_test_mmap.vmarea.end);
1236 assert_eq!(child_mmap.permissions, parent_test_mmap.permissions);
1237
1238 unsafe {
1240 let parent_ptr = mmap.pmarea.start as *const u8;
1241 let child_ptr = child_mmap.pmarea.start as *const u8;
1242
1243 assert_ne!(parent_ptr, child_ptr, "Parent and child should have different physical memory");
1245
1246 for i in 0..test_data.len() {
1248 let parent_byte = *parent_ptr.offset(i as isize);
1249 let child_byte = *child_ptr.offset(i as isize);
1250 assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
1251 }
1252 }
1253
1254 unsafe {
1256 let parent_ptr = mmap.pmarea.start as *mut u8;
1257 let original_value = *parent_ptr;
1258 *parent_ptr = 0xFF; let child_ptr = child_mmap.pmarea.start as *const u8;
1261 let child_first_byte = *child_ptr;
1262
1263 assert_eq!(child_first_byte, original_value, "Child memory should be independent from parent");
1265 }
1266
1267 assert_eq!(child_task.vcpu.get_pc(), parent_task.vcpu.get_pc());
1269
1270 assert_eq!(child_task.entry, parent_task.entry);
1272
1273 assert_eq!(child_task.state, parent_task.state);
1275
1276 assert!(child_task.managed_pages.len() >= num_pages,
1278 "Child should have at least the test pages in managed pages");
1279 }
1280
1281 #[test_case]
1282 fn test_clone_task_stack_copy() {
1283 let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
1284 parent_task.init();
1285
1286 let stack_mmap = parent_task.vm_manager.get_memmap().iter()
1288 .find(|mmap| {
1289 use crate::vm::vmem::VirtualMemoryRegion;
1291 mmap.vmarea.end == crate::environment::USER_STACK_TOP - 1 &&
1292 mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1293 })
1294 .expect("Stack memory map not found in parent task")
1295 .clone();
1296
1297 let stack_test_data: [u8; 16] = [
1299 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22,
1300 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0x00
1301 ];
1302 unsafe {
1303 let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1304 core::ptr::copy_nonoverlapping(stack_test_data.as_ptr(), stack_ptr, stack_test_data.len());
1305 }
1306
1307 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1309
1310 let child_stack_mmap = child_task.vm_manager.get_memmap().iter()
1312 .find(|mmap| {
1313 use crate::vm::vmem::VirtualMemoryRegion;
1314 mmap.vmarea.start == stack_mmap.vmarea.start &&
1315 mmap.vmarea.end == stack_mmap.vmarea.end &&
1316 mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1317 })
1318 .expect("Stack memory map not found in child task");
1319
1320 unsafe {
1322 let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1323 let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1324
1325 assert_ne!(parent_stack_ptr, child_stack_ptr,
1327 "Parent and child should have different stack physical memory");
1328
1329 for i in 0..stack_test_data.len() {
1331 let parent_byte = *parent_stack_ptr.offset(i as isize);
1332 let child_byte = *child_stack_ptr.offset(i as isize);
1333 assert_eq!(parent_byte, child_byte,
1334 "Stack data mismatch at offset {}: parent={:#x}, child={:#x}",
1335 i, parent_byte, child_byte);
1336 }
1337 }
1338
1339 unsafe {
1341 let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1342 let original_value = *parent_stack_ptr;
1343 *parent_stack_ptr = 0xFE; let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1346 let child_first_byte = *child_stack_ptr;
1347
1348 assert_eq!(child_first_byte, original_value,
1350 "Child stack should be independent from parent stack");
1351 }
1352
1353 assert_eq!(child_task.stack_size, parent_task.stack_size,
1355 "Child and parent should have the same stack size");
1356 }
1357
1358 #[test_case]
1359 fn test_clone_task_shared_memory() {
1360 use crate::vm::vmem::{VirtualMemoryMap, MemoryArea, VirtualMemoryPermission};
1361 use crate::mem::page::allocate_raw_pages;
1362 use crate::environment::PAGE_SIZE;
1363
1364 let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
1365 parent_task.init();
1366
1367 let shared_vaddr = 0x5000;
1369 let num_pages = 1;
1370 let pages = allocate_raw_pages(num_pages);
1371 let paddr = pages as usize;
1372
1373 let shared_mmap = VirtualMemoryMap {
1374 pmarea: MemoryArea {
1375 start: paddr,
1376 end: paddr + PAGE_SIZE - 1,
1377 },
1378 vmarea: MemoryArea {
1379 start: shared_vaddr,
1380 end: shared_vaddr + PAGE_SIZE - 1,
1381 },
1382 permissions: VirtualMemoryPermission::Read as usize | VirtualMemoryPermission::Write as usize,
1383 is_shared: true, };
1385
1386 parent_task.vm_manager.add_memory_map(shared_mmap).unwrap();
1388
1389 let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
1391 unsafe {
1392 let shared_ptr = paddr as *mut u8;
1393 core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
1394 }
1395
1396 let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1398
1399 let child_shared_mmap = child_task.vm_manager.get_memmap().iter()
1401 .find(|mmap| mmap.vmarea.start == shared_vaddr && mmap.is_shared)
1402 .expect("Shared memory map not found in child task");
1403
1404 assert_eq!(child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
1406 "Shared memory should have the same physical address in parent and child");
1407
1408 assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
1410 assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
1411
1412 assert!(child_shared_mmap.is_shared, "Shared memory should remain marked as shared");
1414
1415 unsafe {
1417 let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
1418 let original_value = *child_shared_ptr;
1419 *child_shared_ptr = 0xFF; let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
1422 let parent_first_byte = *parent_shared_ptr;
1423
1424 assert_eq!(parent_first_byte, 0xFF,
1426 "Parent should see changes made through child's shared memory reference");
1427
1428 *child_shared_ptr = original_value;
1430 }
1431
1432 unsafe {
1434 let child_ptr = child_shared_mmap.pmarea.start as *const u8;
1435 let parent_ptr = shared_mmap.pmarea.start as *const u8;
1436
1437 for i in 0..test_data.len() {
1439 let parent_byte = *parent_ptr.offset(i as isize);
1440 let child_byte = *child_ptr.offset(i as isize);
1441 assert_eq!(parent_byte, child_byte,
1442 "Shared memory data should be identical from both parent and child views");
1443 }
1444 }
1445 }
1446}