kernel/task/
mod.rs

1//! Task module.
2//!
3//! The task module defines the structure and behavior of tasks in the system.
4
5pub mod syscall;
6pub mod elf_loader;
7
8extern crate alloc;
9
10use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec};
11use spin::Mutex;
12
13use crate::{arch::{get_cpu, vcpu::Vcpu, vm::alloc_virtual_address_space}, environment::{DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE, KERNEL_VM_STACK_END, PAGE_SIZE}, fs::VfsManager, mem::page::{allocate_raw_pages, free_boxed_page, Page}, object::handle::HandleTable, sched::scheduler::get_scheduler, vm::{manager::VirtualMemoryManager, user_kernel_vm_init, user_vm_init, vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion}}};
14use crate::abi::{scarlet::ScarletAbi, AbiModule};
15use crate::sync::waker::Waker;
16use alloc::collections::BTreeMap;
17use spin::Once;
18
19/// Global registry of task-specific wakers for waitpid
20static TASK_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
21
22/// Global registry of parent task wakers for waitpid(-1) operations
23/// Each parent task has a waker that gets triggered when any of its children exit
24static PARENT_WAKERS: Once<Mutex<BTreeMap<usize, Waker>>> = Once::new();
25
26/// Initialize the task wakers registry
27fn init_task_wakers() -> Mutex<BTreeMap<usize, Waker>> {
28    Mutex::new(BTreeMap::new())
29}
30
31/// Initialize the parent waker registry
32fn init_parent_wakers() -> Mutex<BTreeMap<usize, Waker>> {
33    Mutex::new(BTreeMap::new())
34}
35
36/// Get or create a waker for a specific task
37/// 
38/// This function returns a reference to the waker associated with the given task ID.
39/// If no waker exists for the task, a new one is created.
40/// 
41/// # Arguments
42/// 
43/// * `task_id` - The ID of the task to get a waker for
44/// 
45/// # Returns
46/// 
47/// A reference to the waker for the specified task
48pub fn get_task_waker(task_id: usize) -> &'static Waker {
49    let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
50    let mut wakers = wakers_mutex.lock();
51    if !wakers.contains_key(&task_id) {
52        let waker_name = alloc::format!("task_{}", task_id);
53        // We need to create a static string for the waker name
54        let static_name = Box::leak(waker_name.into_boxed_str());
55        wakers.insert(task_id, Waker::new_interruptible(static_name));
56    }
57    // This is safe because we know the waker exists and won't be removed
58    // until the task is cleaned up
59    unsafe {
60        let waker_ptr = wakers.get(&task_id).unwrap() as *const Waker;
61        &*waker_ptr
62    }
63}
64
65/// Get or create a parent waker for waitpid(-1) operations
66/// 
67/// This waker is used when a parent process calls waitpid(-1) to wait for any child.
68/// It's separate from the task-specific wakers to avoid conflicts.
69/// 
70/// # Arguments
71/// 
72/// * `parent_id` - The ID of the parent task
73/// 
74/// # Returns
75/// 
76/// A reference to the parent waker
77pub fn get_parent_waker(parent_id: usize) -> &'static Waker {
78    let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
79    let mut wakers = wakers_mutex.lock();
80    
81    // Create a new waker if it doesn't exist
82    if !wakers.contains_key(&parent_id) {
83        let waker_name = alloc::format!("parent_waker_{}", parent_id);
84        // We need to leak the string to make it 'static
85        let static_name = alloc::boxed::Box::leak(waker_name.into_boxed_str());
86        wakers.insert(parent_id, Waker::new_interruptible(static_name));
87    }
88    
89    // Return a reference to the waker
90    // This is safe because the BTreeMap is never dropped and the Waker is never moved
91    unsafe {
92        let waker_ptr = wakers.get(&parent_id).unwrap() as *const Waker;
93        &*waker_ptr
94    }
95}
96
97/// Wake up any processes waiting for a specific task
98/// 
99/// This function should be called when a task exits to wake up
100/// any parent processes that are waiting for this specific task.
101/// 
102/// # Arguments
103/// 
104/// * `task_id` - The ID of the task that has exited
105pub fn wake_task_waiters(task_id: usize) {
106    let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
107    let wakers = wakers_mutex.lock();
108    if let Some(waker) = wakers.get(&task_id) {
109        waker.wake_all();
110    }
111}
112
113/// Wake up a parent process waiting for any child (waitpid(-1))
114/// 
115/// This function should be called when any child of a parent exits.
116/// 
117/// # Arguments
118/// 
119/// * `parent_id` - The ID of the parent task
120pub fn wake_parent_waiters(parent_id: usize) {
121    let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
122    let wakers = wakers_mutex.lock();
123    if let Some(waker) = wakers.get(&parent_id) {
124        waker.wake_all();
125    }
126}
127
128/// Clean up the waker for a specific task
129/// 
130/// This function should be called when a task is completely cleaned up
131/// to remove its waker from the global registry.
132/// 
133/// # Arguments
134/// 
135/// * `task_id` - The ID of the task to clean up
136pub fn cleanup_task_waker(task_id: usize) {
137    let wakers_mutex = TASK_WAKERS.call_once(init_task_wakers);
138    let mut wakers = wakers_mutex.lock();
139    wakers.remove(&task_id);
140}
141
142/// Clean up the parent waker for a specific task
143/// 
144/// This function should be called when a parent task is completely cleaned up.
145/// 
146/// # Arguments
147/// 
148/// * `parent_id` - The ID of the parent task to clean up
149pub fn cleanup_parent_waker(parent_id: usize) {
150    let wakers_mutex = PARENT_WAKERS.call_once(init_parent_wakers);
151    let mut wakers = wakers_mutex.lock();
152    wakers.remove(&parent_id);
153}
154
155/// Types of blocked states for tasks
156#[derive(Debug, PartialEq, Clone, Copy)]
157pub enum BlockedType {
158    /// Interruptible blocking - can be interrupted by signals
159    Interruptible,
160    /// Uninterruptible blocking - cannot be interrupted, must wait for completion
161    Uninterruptible,
162}
163
164#[derive(Debug, PartialEq, Clone, Copy)]
165pub enum TaskState {
166    NotInitialized,
167    Ready,
168    Running,
169    Blocked(BlockedType),
170    Zombie,
171    Terminated,
172}
173
174#[derive(Debug, PartialEq, Clone, Copy)]
175pub enum TaskType {
176    Kernel,
177    User,
178}
179
180pub struct Task {
181    id: usize,
182    pub name: String,
183    pub priority: u32,
184    pub vcpu: Vcpu,
185    pub state: TaskState,
186    pub task_type: TaskType,
187    pub entry: usize,
188    pub brk: Option<usize>, /* Program break (NOT work in Kernel task) */
189    pub stack_size: usize, /* Size of the stack in bytes */
190    pub data_size: usize, /* Size of the data segment in bytes (page unit) (NOT work in Kernel task) */
191    pub text_size: usize, /* Size of the text segment in bytes (NOT work in Kernel task) */
192    pub max_stack_size: usize, /* Maximum size of the stack in bytes */
193    pub max_data_size: usize, /* Maximum size of the data segment in bytes */
194    pub max_text_size: usize, /* Maximum size of the text segment in bytes */
195    pub vm_manager: VirtualMemoryManager,
196    /// Managed pages
197    /// 
198    /// Managed pages are freed automatically when the task is terminated.
199    pub managed_pages: Vec<ManagedPage>,
200    parent_id: Option<usize>,      /* Parent task ID */
201    children: Vec<usize>,          /* List of child task IDs */
202    exit_status: Option<i32>,      /* Exit code (for monitoring child task termination) */
203
204    /// Dynamic ABI
205    pub abi: Option<Box<dyn AbiModule>>,
206
207    // Current working directory
208    pub cwd: Option<String>,
209
210    /// Virtual File System Manager
211    /// 
212    /// Each task can have its own isolated VfsManager instance for containerization
213    /// and namespace isolation. The VfsManager provides:
214    /// 
215    /// - **Filesystem Isolation**: Independent mount point namespaces allowing
216    ///   complete filesystem isolation between tasks or containers
217    /// - **Selective Sharing**: Arc-based filesystem object sharing enables
218    ///   controlled resource sharing while maintaining namespace independence
219    /// - **Bind Mount Support**: Advanced bind mount capabilities for flexible
220    ///   directory mapping and container orchestration scenarios
221    /// - **Security**: Path normalization and validation preventing directory
222    ///   traversal attacks and unauthorized filesystem access
223    /// 
224    /// # Usage Patterns
225    /// 
226    /// - `None`: Task uses global filesystem namespace (traditional Unix-like behavior)
227    /// - `Some(Arc<VfsManager>)`: Task has isolated filesystem namespace (container-like behavior)
228    /// 
229    /// # Thread Safety
230    /// 
231    /// VfsManager is thread-safe and can be shared between tasks using Arc.
232    /// All internal operations use RwLock for concurrent access protection.
233    pub vfs: Option<Arc<VfsManager>>,
234
235
236
237    // KernelObject table
238    pub handle_table: HandleTable,
239}
240
241#[derive(Debug, Clone)]
242pub struct ManagedPage {
243    pub vaddr: usize,
244    pub page: Box<Page>,
245}
246
247pub enum CloneFlagsDef {
248    Vm      = 0b00000001, // Clone the VM
249    Fs      = 0b00000010, // Clone the filesystem
250    Files   = 0b00000100, // Clone the file descriptors
251}
252
253#[derive(Debug, Clone, Copy)]
254pub struct CloneFlags {
255    raw: u64,
256}
257
258impl CloneFlags {
259    pub fn new() -> Self {
260        CloneFlags { raw: 0 }
261    }
262
263    pub fn from_raw(raw: u64) -> Self {
264        CloneFlags { raw }
265    }
266
267    pub fn set(&mut self, flag: CloneFlagsDef) {
268        self.raw |= flag as u64;
269    }
270
271    pub fn clear(&mut self, flag: CloneFlagsDef) {
272        self.raw &= !(flag as u64);
273    }
274
275    pub fn is_set(&self, flag: CloneFlagsDef) -> bool {
276        (self.raw & (flag as u64)) != 0
277    }
278
279    pub fn get_raw(&self) -> u64 {
280        self.raw
281    }
282}
283
284impl Default for CloneFlags {
285    fn default() -> Self {
286        let raw = CloneFlagsDef::Fs as u64 | CloneFlagsDef::Files as u64;
287        CloneFlags { raw }
288    }
289}
290
291static TASK_ID: Mutex<usize> = Mutex::new(1);
292
293impl Task {
294    pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
295        let mut taskid = TASK_ID.lock();
296        let task = Task {
297            id: *taskid,
298            name,
299            priority,
300            vcpu: Vcpu::new(match task_type {
301                TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
302                TaskType::User => crate::arch::vcpu::Mode::User,
303            }),
304            state: TaskState::NotInitialized,
305            task_type,
306            entry: 0,
307            brk: None,
308            stack_size: 0,
309            data_size: 0,
310            text_size: 0,
311            max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
312            max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
313            max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
314            vm_manager: VirtualMemoryManager::new(),
315            managed_pages: Vec::new(),
316            parent_id: None,
317            children: Vec::new(),
318            exit_status: None,
319            abi: Some(Box::new(ScarletAbi::default())), // Default ABI
320            cwd: None,
321            vfs: None,
322            handle_table: HandleTable::new(),
323        };
324
325        *taskid += 1;
326        task
327    }
328    
329    pub fn init(&mut self) {
330        match self.task_type {
331            TaskType::Kernel => {
332                user_kernel_vm_init(self);
333                /* Set sp to the top of the kernel stack */
334                self.vcpu.set_sp(KERNEL_VM_STACK_END + 1);
335
336            },
337            TaskType::User => { 
338                user_vm_init(self);
339                /* Set sp to the top of the user stack */
340                self.vcpu.set_sp(0xffff_ffff_ffff_f000);
341            }
342        }
343        
344        /* Set the task state to Ready */
345        self.state = TaskState::Ready;
346    }
347
348    pub fn get_id(&self) -> usize {
349        self.id
350    }
351
352    /// Set the task state
353    /// 
354    /// # Arguments
355    /// * `state` - The new task state
356    /// 
357    pub fn set_state(&mut self, state: TaskState) {
358        self.state = state;
359    }
360
361    /// Get the task state
362    /// 
363    /// # Returns
364    /// The task state
365    /// 
366    pub fn get_state(&self) -> TaskState {
367        self.state
368    }
369
370   /// Get the size of the task.
371   /// 
372   /// # Returns
373   /// The size of the task in bytes.
374    pub fn get_size(&self) -> usize {
375        self.stack_size + self.text_size + self.data_size
376    }
377
378    /// Get the program break (NOT work in Kernel task)
379    /// 
380    /// # Returns
381    /// The program break address
382    pub fn get_brk(&self) -> usize {
383        if self.brk.is_none() {
384            return self.text_size + self.data_size;
385        }
386        self.brk.unwrap()
387    }
388
389    /// Set the program break (NOT work in Kernel task)
390    /// 
391    /// # Arguments
392    /// * `brk` - The new program break address
393    /// 
394    /// # Returns
395    /// If successful, returns Ok(()), otherwise returns an error.
396    pub fn set_brk(&mut self, brk: usize) -> Result<(), &'static str> {
397        // println!("New brk: {:#x}", brk);
398        if brk < self.text_size {
399            return Err("Invalid address");
400        }
401        let prev_brk = self.get_brk();
402        if brk < prev_brk {
403            /* Free pages */
404            /* Round address to the page boundary */
405            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
406            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
407            let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
408            self.free_data_pages(addr, num_of_pages);            
409        } else if brk > prev_brk {
410            /* Allocate pages */
411            /* Round address to the page boundary */
412            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
413            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
414            let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
415
416            if num_of_pages > 0 {
417                match self.vm_manager.search_memory_map(prev_addr) {
418                    Some(_) => {},
419                    None => {
420                        match self.allocate_data_pages(prev_addr, num_of_pages) {
421                            Ok(_) => {},
422                            Err(_) => return Err("Failed to allocate pages"),
423                        }
424                    },
425                }
426            }
427        }
428        self.brk = Some(brk);
429        Ok(())
430    }
431
432    /// Allocate pages for the task.
433    /// 
434    /// # Arguments
435    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
436    /// * `num_of_pages` - The number of pages to allocate
437    /// * `segment` - The segment type to allocate pages
438    /// 
439    /// # Returns
440    /// The memory map of the allocated pages, if successful.
441    /// 
442    /// # Errors
443    /// If the address is not page aligned, or if the pages cannot be allocated.
444    /// 
445    /// # Note
446    /// This function don't increment the size of the task.
447    /// You must increment the size of the task manually.
448    /// 
449    pub fn allocate_pages(&mut self, vaddr: usize, num_of_pages: usize, permissions: usize) -> Result<VirtualMemoryMap, &'static str> {
450
451        if vaddr % PAGE_SIZE != 0 {
452            return Err("Address is not page aligned");
453        }
454        
455        let pages = allocate_raw_pages(num_of_pages);
456        let size = num_of_pages * PAGE_SIZE;
457        let paddr = pages as usize;
458        let mmap = VirtualMemoryMap {
459            pmarea: MemoryArea {
460                start: paddr,
461                end: paddr + size - 1,
462            },
463            vmarea: MemoryArea {
464                start: vaddr,
465                end: vaddr + size - 1,
466            },
467            permissions,
468            is_shared: false, // Default to not shared for task-allocated pages
469        };
470        self.vm_manager.add_memory_map(mmap).map_err(|e| panic!("Failed to add memory map: {}", e))?;
471
472        for i in 0..num_of_pages {
473            let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
474            let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
475            self.add_managed_page(ManagedPage {
476                vaddr,
477                page
478            });
479        }
480
481
482        Ok(mmap)
483    }
484
485    /// Free pages for the task.
486    /// 
487    /// # Arguments
488    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
489    /// * `num_of_pages` - The number of pages to free
490    pub fn free_pages(&mut self, vaddr: usize, num_of_pages: usize) {
491        let page = vaddr / PAGE_SIZE;
492        for p in 0..num_of_pages {
493            let vaddr = (page + p) * PAGE_SIZE;
494            match self.vm_manager.search_memory_map_idx(vaddr) {
495                Some(idx) => {
496                    let mmap = self.vm_manager.remove_memory_map(idx).unwrap();
497                    if p == 0 && mmap.vmarea.start < vaddr {
498                        /* Re add the first part of the memory map */
499                        let size = vaddr - mmap.vmarea.start;
500                        let paddr = mmap.pmarea.start;
501                        let mmap1 = VirtualMemoryMap {
502                            pmarea: MemoryArea {
503                                start: paddr,
504                                end: paddr + size - 1,
505                            },
506                            vmarea: MemoryArea {
507                                start: mmap.vmarea.start,
508                                end: vaddr - 1,
509                            },
510                            permissions: mmap.permissions,
511                            is_shared: mmap.is_shared,
512                        };
513                        self.vm_manager.add_memory_map(mmap1)
514                            .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
515                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
516                        // println!("Re added map: {:#x} - {:#x}", mmap1.vmarea.start, mmap1.vmarea.end);
517                    }
518                    if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
519                        /* Re add the second part of the memory map */
520                        let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
521                        let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
522                        let mmap2 = VirtualMemoryMap {
523                            pmarea: MemoryArea {
524                                start: paddr,
525                                end: paddr + size - 1,
526                            },
527                            vmarea: MemoryArea {
528                                start: vaddr + PAGE_SIZE,
529                                end: mmap.vmarea.end,
530                            },
531                            permissions: mmap.permissions,
532                            is_shared: mmap.is_shared,
533                        };
534                        self.vm_manager.add_memory_map(mmap2)
535                            .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
536                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
537                        // println!("Re added map: {:#x} - {:#x}", mmap2.vmarea.start, mmap2.vmarea.end);
538                    }
539                    // let offset = vaddr - mmap.vmarea.start;
540                    // free_raw_pages((mmap.pmarea.start + offset) as *mut Page, 1);
541
542                    if let Some(free_page) = self.remove_managed_page(vaddr) {
543                        free_boxed_page(free_page);
544                    }
545                    
546                    // println!("Freed pages : {:#x} - {:#x}", vaddr, vaddr + PAGE_SIZE - 1);
547                },
548                None => {},
549            }
550        }
551        /* Unmap pages */
552        let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
553        for p in 0..num_of_pages {
554            let vaddr = (page + p) * PAGE_SIZE;
555            root_pagetable.unmap(vaddr);
556        }
557    }
558
559    /// Allocate text pages for the task. And increment the size of the task.
560    ///
561    /// # Arguments
562    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
563    /// * `num_of_pages` - The number of pages to allocate
564    /// 
565    /// # Returns
566    /// The memory map of the allocated pages, if successful.
567    /// 
568    /// # Errors
569    /// If the address is not page aligned, or if the pages cannot be allocated.
570    /// 
571    pub fn allocate_text_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
572        let permissions = VirtualMemoryRegion::Text.default_permissions();
573        let res = self.allocate_pages(vaddr, num_of_pages, permissions);   
574        if res.is_ok() {
575            self.text_size += num_of_pages * PAGE_SIZE;
576        }
577        res
578    }
579
580    /// Free text pages for the task. And decrement the size of the task.
581    /// 
582    /// # Arguments
583    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
584    /// * `num_of_pages` - The number of pages to free
585    /// 
586    pub fn free_text_pages(&mut self, vaddr: usize, num_of_pages: usize) {
587        self.free_pages(vaddr, num_of_pages);
588        self.text_size -= num_of_pages * PAGE_SIZE;
589    }
590
591    /// Allocate stack pages for the task. And increment the size of the task.
592    ///
593    /// # Arguments
594    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
595    /// * `num_of_pages` - The number of pages to allocate
596    /// 
597    /// # Returns
598    /// The memory map of the allocated pages, if successful.
599    /// 
600    /// # Errors
601    /// If the address is not page aligned, or if the pages cannot be allocated.
602    /// 
603    pub fn allocate_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
604        let permissions = VirtualMemoryRegion::Stack.default_permissions();
605        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
606        self.stack_size += num_of_pages * PAGE_SIZE;
607        Ok(res)
608    }
609
610    /// Free stack pages for the task. And decrement the size of the task.
611    /// 
612    /// # Arguments
613    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
614    /// * `num_of_pages` - The number of pages to free
615    /// 
616    pub fn free_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) {
617        self.free_pages(vaddr, num_of_pages);
618        self.stack_size -= num_of_pages * PAGE_SIZE;
619    }
620
621    /// Allocate data pages for the task. And increment the size of the task.
622    ///
623    /// # Arguments
624    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
625    /// * `num_of_pages` - The number of pages to allocate
626    /// 
627    /// # Returns
628    /// The memory map of the allocated pages, if successful.
629    /// 
630    /// # Errors
631    /// If the address is not page aligned, or if the pages cannot be allocated.
632    /// 
633    pub fn allocate_data_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
634        let permissions = VirtualMemoryRegion::Data.default_permissions();
635        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
636        self.data_size += num_of_pages * PAGE_SIZE;
637        Ok(res)
638    }
639
640    /// Free data pages for the task. And decrement the size of the task.
641    /// 
642    /// # Arguments
643    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
644    /// * `num_of_pages` - The number of pages to free
645    /// 
646    pub fn free_data_pages(&mut self, vaddr: usize, num_of_pages: usize) {
647        self.free_pages(vaddr, num_of_pages);
648        self.data_size -= num_of_pages * PAGE_SIZE;
649    }
650
651    /// Allocate guard pages for the task.
652    /// 
653    /// # Arguments
654    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
655    /// * `num_of_pages` - The number of pages to allocate
656    /// 
657    /// # Returns
658    /// The memory map of the allocated pages, if successful.
659    /// 
660    /// # Errors
661    /// If the address is not page aligned, or if the pages cannot be allocated.
662    /// 
663    /// # Note
664    /// Gurad pages are not allocated in the physical memory space.
665    /// This function only maps the pages to the virtual memory space.
666    /// 
667    pub fn allocate_guard_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
668        let permissions = VirtualMemoryRegion::Guard.default_permissions();
669        let mmap = VirtualMemoryMap {
670            pmarea: MemoryArea {
671                start: 0,
672                end: 0,
673            },
674            vmarea: MemoryArea {
675                start: vaddr,
676                end: vaddr + num_of_pages * PAGE_SIZE - 1,
677            },
678            permissions,
679            is_shared: VirtualMemoryRegion::Guard.is_shareable(), // Guard pages can be shared
680        };
681        Ok(mmap)
682    }
683
684    /// Add pages to the task
685    /// 
686    /// # Arguments
687    /// * `pages` - The managed page to add
688    /// 
689    /// # Note
690    /// Pages added as ManagedPage of the Task will be automatically freed when the Task is terminated.
691    /// So, you must not free them by calling free_raw_pages/free_boxed_pages manually.
692    /// 
693    pub fn add_managed_page(&mut self, pages: ManagedPage) {
694        self.managed_pages.push(pages);
695    }
696
697    /// Get managed page
698    /// 
699    /// # Arguments
700    /// * `vaddr` - The virtual address of the page
701    /// 
702    /// # Returns
703    /// The managed page if found, otherwise None
704    /// 
705    fn get_managed_page(&self, vaddr: usize) -> Option<&ManagedPage> {
706        for page in &self.managed_pages {
707            if page.vaddr == vaddr {
708                return Some(page);
709            }
710        }
711        None
712    }
713
714    /// Remove managed page
715    /// 
716    /// # Arguments
717    /// * `vaddr` - The virtual address of the page
718    /// 
719    /// # Returns
720    /// The removed managed page if found, otherwise None
721    /// 
722    fn remove_managed_page(&mut self, vaddr: usize) -> Option<Box<Page>> {
723        for i in 0..self.managed_pages.len() {
724            if self.managed_pages[i].vaddr == vaddr {
725                let page = self.managed_pages.remove(i);
726                return Some(page.page);
727            }
728        }
729        None
730    }
731
732
733    // Set the entry point
734    pub fn set_entry_point(&mut self, entry: usize) {
735        self.vcpu.set_pc(entry as u64);
736    }
737
738    /// Get the parent ID
739    ///
740    /// # Returns
741    /// The parent task ID, or None if there is no parent
742    pub fn get_parent_id(&self) -> Option<usize> {
743        self.parent_id
744    }
745    
746    /// Set the parent task
747    ///
748    /// # Arguments
749    /// * `parent_id` - The ID of the parent task
750    pub fn set_parent_id(&mut self, parent_id: usize) {
751        self.parent_id = Some(parent_id);
752    }
753    
754    /// Add a child task
755    ///
756    /// # Arguments
757    /// * `child_id` - The ID of the child task
758    pub fn add_child(&mut self, child_id: usize) {
759        if !self.children.contains(&child_id) {
760            self.children.push(child_id);
761        }
762    }
763    
764    /// Remove a child task
765    ///
766    /// # Arguments
767    /// * `child_id` - The ID of the child task to remove
768    ///
769    /// # Returns
770    /// true if the removal was successful, false if the child task was not found
771    pub fn remove_child(&mut self, child_id: usize) -> bool {
772        if let Some(pos) = self.children.iter().position(|&id| id == child_id) {
773            self.children.remove(pos);
774            true
775        } else {
776            false
777        }
778    }
779    
780    /// Get the list of child tasks
781    ///
782    /// # Returns
783    /// A vector of child task IDs
784    pub fn get_children(&self) -> &Vec<usize> {
785        &self.children
786    }
787    
788    /// Set the exit status
789    ///
790    /// # Arguments
791    /// * `status` - The exit status
792    pub fn set_exit_status(&mut self, status: i32) {
793        self.exit_status = Some(status);
794    }
795    
796    /// Get the exit status
797    ///
798    /// # Returns
799    /// The exit status, or None if not set
800    pub fn get_exit_status(&self) -> Option<i32> {
801        self.exit_status
802    }
803
804    /// Get the file descriptor table
805    /// 
806    /// # Returns
807    /// A reference to the file descriptor table
808    /// 
809    /// Clone this task, creating a near-identical copy
810    /// 
811    /// # Arguments
812    /// 
813    /// # Returns
814    /// The cloned task
815    /// 
816    /// # Errors 
817    /// If the task cannot be cloned, an error is returned.
818    ///
819    pub fn clone_task(&mut self, flags: CloneFlags) -> Result<Task, &'static str> {
820        // Create a new task (but don't call init() yet)
821        let mut child = Task::new(
822            self.name.clone(),
823            self.priority,
824            self.task_type
825        );
826        
827        // First, set up the virtual memory manager with the same ASID allocation
828        match self.task_type {
829            TaskType::Kernel => {
830                // For kernel tasks, we need to call init to set up the kernel VM
831                child.init();
832            },
833            TaskType::User => {
834                if !flags.is_set(CloneFlagsDef::Vm) {
835                    // For user tasks, manually set up VM without calling init()
836                    // to avoid creating new stack that would overwrite parent's stack content
837                    let asid = alloc_virtual_address_space();
838                    child.vm_manager.set_asid(asid);
839                }
840            }
841        }
842        
843        if !flags.is_set(CloneFlagsDef::Vm) {
844            // Copy or share memory maps from parent to child
845            for mmap in self.vm_manager.get_memmap() {
846                let num_pages = (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
847                let vaddr = mmap.vmarea.start;
848                
849                if num_pages > 0 {
850                    if mmap.is_shared {
851                        // Shared memory regions: just reference the same physical pages
852                        let shared_mmap = VirtualMemoryMap {
853                            pmarea: mmap.pmarea, // Same physical memory
854                            vmarea: mmap.vmarea, // Same virtual addresses
855                            permissions: mmap.permissions,
856                            is_shared: true,
857                        };
858                        // Add the shared memory map directly to the child task
859                        child.vm_manager.add_memory_map(shared_mmap)
860                            .map_err(|_| "Failed to add shared memory map to child task")?;
861
862                        // TODO: Add logic to determine if the memory map is a trampoline
863                        // If the memory map is the trampoline, pre-map it
864                        if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
865                            // Pre-map the trampoline page
866                            let root_pagetable = child.vm_manager.get_root_page_table().unwrap();
867                            root_pagetable.map_memory_area(child.vm_manager.get_asid(), shared_mmap)?;
868                        }
869
870                    } else {
871                        // Private memory regions: allocate new pages and copy contents
872                        let permissions = mmap.permissions;
873                        let pages = allocate_raw_pages(num_pages);
874                        let size = num_pages * PAGE_SIZE;
875                        let paddr = pages as usize;
876                        let new_mmap = VirtualMemoryMap {
877                            pmarea: MemoryArea {
878                                start: paddr,
879                                end: paddr + (size - 1),
880                            },
881                            vmarea: MemoryArea {
882                                start: vaddr,
883                                end: vaddr + (size - 1),
884                            },
885                            permissions,
886                            is_shared: false,
887                        };
888                        
889                        // Copy the contents of the original memory (including stack contents)
890                        for i in 0..num_pages {
891                            let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
892                            let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
893                            unsafe {
894                                core::ptr::copy_nonoverlapping(
895                                    src_page_addr as *const u8,
896                                    dst_page_addr as *mut u8,
897                                    PAGE_SIZE
898                                );
899                            }
900                            // Manage the new pages in the child task
901                            child.add_managed_page(ManagedPage {
902                                vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
903                                page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
904                            });
905                        }
906                        // Add the new memory map to the child task
907                        child.vm_manager.add_memory_map(new_mmap)
908                            .map_err(|_| "Failed to add memory map to child task")?;
909                    }
910                }
911            }
912        }
913
914        // Copy register states
915        child.vcpu.regs = self.vcpu.regs.clone();
916        
917        // Set the ABI
918        if let Some(abi) = &self.abi {
919            child.abi = Some(abi.clone_boxed());
920        } else {
921            child.abi = None; // No ABI set
922        }
923        
924        // Copy state such as data size
925        child.stack_size = self.stack_size;
926        child.data_size = self.data_size;
927        child.text_size = self.text_size;
928        child.max_stack_size = self.max_stack_size;
929        child.max_data_size = self.max_data_size;
930        child.max_text_size = self.max_text_size;
931        
932        // Set the same entry point and PC
933        child.entry = self.entry;
934        child.vcpu.set_pc(self.vcpu.get_pc());
935
936        if flags.is_set(CloneFlagsDef::Files) {
937            // Clone the file descriptor table
938            child.handle_table = self.handle_table.clone();
939        }
940        
941        if flags.is_set(CloneFlagsDef::Fs) {
942            // Clone the filesystem manager
943            if let Some(vfs) = &self.vfs {
944                child.vfs = Some(vfs.clone());
945                // Copy the current working directory
946                child.cwd = self.cwd.clone();
947            } else {
948                child.vfs = None;
949                child.cwd = None; // No filesystem manager, no current working directory
950            }
951        }
952
953        // Set the ABI
954        if let Some(abi) = &self.abi {
955            child.abi = Some(abi.clone_boxed());
956        } else {
957            child.abi = None; // No ABI set
958        }
959
960        // Set the state to Ready
961        child.state = self.state;
962
963        // Set parent-child relationship
964        child.set_parent_id(self.id);
965        self.add_child(child.get_id());
966
967        Ok(child)
968    }
969
970    /// Exit the task
971    /// 
972    /// # Arguments
973    /// * `status` - The exit status
974    /// 
975    pub fn exit(&mut self, status: i32) {
976        // crate::println!("Task {} ({}) exiting with status {}", self.id, self.name, status);
977        
978        // Close all open handles when task exits
979        self.handle_table.close_all();
980        
981        match self.parent_id {
982            Some(parent_id) => {
983                if get_scheduler().get_task_by_id(parent_id).is_none() {
984                    // crate::println!("Task {}: Parent {} not found, terminating", self.id, parent_id);
985                    self.state = TaskState::Terminated;
986                    return;
987                }
988                /* Set the exit status */
989                self.set_exit_status(status);
990                self.state = TaskState::Zombie;
991                // crate::println!("Task {}: Set to Zombie state, parent {}", self.id, parent_id);
992            },
993            None => {
994                /* If the task has no parent, it is terminated */
995                // crate::println!("Task {}: No parent, terminating", self.id);
996                self.state = TaskState::Terminated;
997            }
998        }
999    }
1000
1001    /// Wait for a child task to exit and collect its status
1002    /// 
1003    /// # Arguments
1004    /// * `child_id` - The ID of the child task to wait for
1005    /// 
1006    /// # Returns
1007    /// The exit status of the child task, or an error if the child is not found or not in Zombie state
1008    pub fn wait(&mut self, child_id: usize) -> Result<i32, WaitError> {
1009        if !self.children.contains(&child_id) {
1010            return Err(WaitError::NoSuchChild("No such child task".to_string()));
1011        }
1012
1013        if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
1014            if child_task.get_state() == TaskState::Zombie {
1015                let status = child_task.get_exit_status().unwrap_or(-1);
1016                child_task.set_state(TaskState::Terminated);
1017                self.remove_child(child_id);
1018                Ok(status)
1019            } else {
1020                Err(WaitError::ChildNotExited("Child has not exited or is not a zombie".to_string()))
1021            }
1022        } else {
1023            Err(WaitError::ChildTaskNotFound("Child task not found".to_string()))
1024        }
1025    }
1026
1027    // VFS Helper Methods
1028    
1029    /// Set the VFS manager
1030    /// 
1031    /// # Arguments
1032    /// * `vfs` - The VfsManager to set as the VFS
1033    pub fn set_vfs(&mut self, vfs: Arc<VfsManager>) {
1034        self.vfs = Some(vfs);
1035    }
1036    
1037    /// Get a reference to the VFS
1038    pub fn get_vfs(&self) -> Option<&Arc<VfsManager>> {
1039        self.vfs.as_ref()
1040    }
1041
1042    /// Set the current working directory
1043    pub fn set_cwd(&mut self, cwd: String) {
1044        self.cwd = Some(cwd);
1045    }
1046
1047    /// Get the current working directory
1048    pub fn get_cwd(&self) -> Option<&String> {
1049        self.cwd.as_ref()
1050    }
1051}
1052
1053#[derive(Debug)]
1054pub enum WaitError {
1055    NoSuchChild(String),
1056    ChildNotExited(String),
1057    ChildTaskNotFound(String),
1058}
1059
1060impl WaitError {
1061    pub fn message(&self) -> &str {
1062        match self {
1063            WaitError::NoSuchChild(msg) => msg,
1064            WaitError::ChildNotExited(msg) => msg,
1065            WaitError::ChildTaskNotFound(msg) => msg,
1066        }
1067    }
1068}
1069
1070/// Create a new kernel task.
1071/// 
1072/// # Arguments
1073/// * `name` - The name of the task
1074/// * `priority` - The priority of the task
1075/// * `func` - The function to run in the task
1076/// 
1077/// # Returns
1078/// The new task.
1079pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
1080    let mut task = Task::new(name, priority, TaskType::Kernel);
1081    task.entry = func as usize;
1082    task
1083}
1084
1085/// Create a new user task.
1086/// 
1087/// # Arguments
1088/// * `name` - The name of the task
1089/// * `priority` - The priority of the task
1090/// 
1091/// # Returns
1092/// The new task.
1093pub fn new_user_task(name: String, priority: u32) -> Task {
1094    Task::new(name, priority, TaskType::User)
1095}
1096
1097/// Get the current task.
1098/// 
1099/// # Returns
1100/// The current task if it exists.
1101pub fn mytask() -> Option<&'static mut Task> {
1102    let cpu = get_cpu();
1103    get_scheduler().get_current_task(cpu.get_cpuid())
1104}
1105
1106/// Set the current working directory for the current task
1107/// 
1108/// # Arguments
1109/// * `cwd` - New current working directory path
1110/// 
1111/// # Returns
1112/// * `true` if successful, `false` if no current task
1113pub fn set_current_task_cwd(cwd: String) -> bool {
1114    if let Some(task) = mytask() {
1115        task.set_cwd(cwd);
1116        true
1117    } else {
1118        false
1119    }
1120}
1121
1122#[cfg(test)]
1123mod tests {
1124    use alloc::string::ToString;
1125
1126    use crate::task::CloneFlags;
1127
1128    #[test_case]
1129    fn test_set_brk() {
1130        let mut task = super::new_user_task("Task0".to_string(), 0);
1131        task.init();
1132        assert_eq!(task.get_brk(), 0);
1133        task.set_brk(0x1000).unwrap();
1134        assert_eq!(task.get_brk(), 0x1000);
1135        task.set_brk(0x2000).unwrap();
1136        assert_eq!(task.get_brk(), 0x2000);
1137        task.set_brk(0x1008).unwrap();
1138        assert_eq!(task.get_brk(), 0x1008);
1139        task.set_brk(0x1000).unwrap();
1140        assert_eq!(task.get_brk(), 0x1000);
1141    }
1142
1143    #[test_case]
1144    fn test_task_parent_child_relationship() {
1145        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1146        parent_task.init();
1147
1148        let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
1149        child_task.init();
1150
1151        // Set parent-child relationship
1152        child_task.set_parent_id(parent_task.get_id());
1153        parent_task.add_child(child_task.get_id());
1154
1155        // Verify parent-child relationship
1156        assert_eq!(child_task.get_parent_id(), Some(parent_task.get_id()));
1157        assert!(parent_task.get_children().contains(&child_task.get_id()));
1158
1159        // Remove child and verify
1160        assert!(parent_task.remove_child(child_task.get_id()));
1161        assert!(!parent_task.get_children().contains(&child_task.get_id()));
1162    }
1163
1164    #[test_case]
1165    fn test_task_exit_status() {
1166        let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
1167        task.init();
1168
1169        // Verify initial exit status is None
1170        assert_eq!(task.get_exit_status(), None);
1171
1172        // Set and verify exit status
1173        task.set_exit_status(0);
1174        assert_eq!(task.get_exit_status(), Some(0));
1175
1176        task.set_exit_status(1);
1177        assert_eq!(task.get_exit_status(), Some(1));
1178    }
1179
1180    #[test_case]
1181    fn test_clone_task_memory_copy() {
1182        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1183        parent_task.init();
1184
1185        // Allocate some memory pages for the parent task
1186        let vaddr = 0x1000;
1187        let num_pages = 2;
1188        let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
1189
1190        // Write test data to parent's memory
1191        let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
1192        unsafe {
1193            let dst_ptr = mmap.pmarea.start as *mut u8;
1194            core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
1195        }
1196
1197        // Get parent memory map count before cloning
1198        let parent_memmap_count = parent_task.vm_manager.get_memmap().len();
1199        let parent_id = parent_task.get_id();
1200
1201        // Clone the parent task
1202        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1203
1204        // Get child memory map count after cloning
1205        let child_memmap_count = child_task.vm_manager.get_memmap().len();
1206
1207        // Verify that the number of memory maps are identical
1208        assert_eq!(child_memmap_count, parent_memmap_count, 
1209            "Child should have the same number of memory maps as parent: child={}, parent={}",
1210            child_memmap_count, parent_memmap_count);
1211
1212        // Verify parent-child relationship was established
1213        assert_eq!(child_task.get_parent_id(), Some(parent_id));
1214        assert!(parent_task.get_children().contains(&child_task.get_id()));
1215
1216        // Verify memory sizes were copied
1217        assert_eq!(child_task.stack_size, parent_task.stack_size);
1218        assert_eq!(child_task.data_size, parent_task.data_size);
1219        assert_eq!(child_task.text_size, parent_task.text_size);
1220
1221        // Find the corresponding memory map in child that matches our test allocation
1222        let child_memmaps = child_task.vm_manager.get_memmap();
1223        let child_mmap = child_memmaps.iter()
1224            .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1225            .expect("Test memory map not found in child task");
1226
1227        // Verify that our specific memory region exists in both parent and child
1228        let parent_memmaps = parent_task.vm_manager.get_memmap();
1229        let parent_test_mmap = parent_memmaps.iter()
1230            .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1231            .expect("Test memory map not found in parent task");
1232
1233        // Verify the virtual memory ranges match
1234        assert_eq!(child_mmap.vmarea.start, parent_test_mmap.vmarea.start);
1235        assert_eq!(child_mmap.vmarea.end, parent_test_mmap.vmarea.end);
1236        assert_eq!(child_mmap.permissions, parent_test_mmap.permissions);
1237
1238        // Verify the data was copied correctly
1239        unsafe {
1240            let parent_ptr = mmap.pmarea.start as *const u8;
1241            let child_ptr = child_mmap.pmarea.start as *const u8;
1242            
1243            // Check that physical addresses are different (separate memory)
1244            assert_ne!(parent_ptr, child_ptr, "Parent and child should have different physical memory");
1245            
1246            // Check that the data content is identical
1247            for i in 0..test_data.len() {
1248                let parent_byte = *parent_ptr.offset(i as isize);
1249                let child_byte = *child_ptr.offset(i as isize);
1250                assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
1251            }
1252        }
1253
1254        // Verify that modifying parent's memory doesn't affect child's memory
1255        unsafe {
1256            let parent_ptr = mmap.pmarea.start as *mut u8;
1257            let original_value = *parent_ptr;
1258            *parent_ptr = 0xFF; // Modify first byte in parent
1259            
1260            let child_ptr = child_mmap.pmarea.start as *const u8;
1261            let child_first_byte = *child_ptr;
1262            
1263            // Child's first byte should still be the original value
1264            assert_eq!(child_first_byte, original_value, "Child memory should be independent from parent");
1265        }
1266
1267        // Verify register states were copied
1268        assert_eq!(child_task.vcpu.get_pc(), parent_task.vcpu.get_pc());
1269        
1270        // Verify entry point was copied
1271        assert_eq!(child_task.entry, parent_task.entry);
1272
1273        // Verify state was copied
1274        assert_eq!(child_task.state, parent_task.state);
1275
1276        // Verify that both tasks have the correct number of managed pages
1277        assert!(child_task.managed_pages.len() >= num_pages, 
1278            "Child should have at least the test pages in managed pages");
1279    }
1280
1281    #[test_case]
1282    fn test_clone_task_stack_copy() {
1283        let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
1284        parent_task.init();
1285
1286        // Find the stack memory map in parent
1287        let stack_mmap = parent_task.vm_manager.get_memmap().iter()
1288            .find(|mmap| {
1289                // Stack should be near USER_STACK_TOP and have stack permissions
1290                use crate::vm::vmem::VirtualMemoryRegion;
1291                mmap.vmarea.end == crate::environment::USER_STACK_TOP - 1 && 
1292                mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1293            })
1294            .expect("Stack memory map not found in parent task")
1295            .clone();
1296
1297        // Write test data to parent's stack
1298        let stack_test_data: [u8; 16] = [
1299            0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22,
1300            0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0x00
1301        ];
1302        unsafe {
1303            let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1304            core::ptr::copy_nonoverlapping(stack_test_data.as_ptr(), stack_ptr, stack_test_data.len());
1305        }
1306
1307        // Clone the parent task
1308        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1309
1310        // Find the corresponding stack memory map in child
1311        let child_stack_mmap = child_task.vm_manager.get_memmap().iter()
1312            .find(|mmap| {
1313                use crate::vm::vmem::VirtualMemoryRegion;
1314                mmap.vmarea.start == stack_mmap.vmarea.start &&
1315                mmap.vmarea.end == stack_mmap.vmarea.end &&
1316                mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1317            })
1318            .expect("Stack memory map not found in child task");
1319
1320        // Verify that stack content was copied correctly
1321        unsafe {
1322            let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1323            let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1324
1325            // Check that physical addresses are different (separate memory)
1326            assert_ne!(parent_stack_ptr, child_stack_ptr, 
1327                "Parent and child should have different stack physical memory");
1328
1329            // Check that the stack data content is identical
1330            for i in 0..stack_test_data.len() {
1331                let parent_byte = *parent_stack_ptr.offset(i as isize);
1332                let child_byte = *child_stack_ptr.offset(i as isize);
1333                assert_eq!(parent_byte, child_byte, 
1334                    "Stack data mismatch at offset {}: parent={:#x}, child={:#x}", 
1335                    i, parent_byte, child_byte);
1336            }
1337        }
1338
1339        // Verify that modifying parent's stack doesn't affect child's stack
1340        unsafe {
1341            let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1342            let original_value = *parent_stack_ptr;
1343            *parent_stack_ptr = 0xFE; // Modify first byte in parent stack
1344
1345            let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1346            let child_first_byte = *child_stack_ptr;
1347
1348            // Child's first byte should still be the original value
1349            assert_eq!(child_first_byte, original_value, 
1350                "Child stack should be independent from parent stack");
1351        }
1352
1353        // Verify stack sizes match
1354        assert_eq!(child_task.stack_size, parent_task.stack_size,
1355            "Child and parent should have the same stack size");
1356    }
1357
1358    #[test_case]
1359    fn test_clone_task_shared_memory() {
1360        use crate::vm::vmem::{VirtualMemoryMap, MemoryArea, VirtualMemoryPermission};
1361        use crate::mem::page::allocate_raw_pages;
1362        use crate::environment::PAGE_SIZE;
1363        
1364        let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
1365        parent_task.init();
1366
1367        // Manually add a shared memory region to test sharing behavior
1368        let shared_vaddr = 0x5000;
1369        let num_pages = 1;
1370        let pages = allocate_raw_pages(num_pages);
1371        let paddr = pages as usize;
1372        
1373        let shared_mmap = VirtualMemoryMap {
1374            pmarea: MemoryArea {
1375                start: paddr,
1376                end: paddr + PAGE_SIZE - 1,
1377            },
1378            vmarea: MemoryArea {
1379                start: shared_vaddr,
1380                end: shared_vaddr + PAGE_SIZE - 1,
1381            },
1382            permissions: VirtualMemoryPermission::Read as usize | VirtualMemoryPermission::Write as usize,
1383            is_shared: true, // This should be shared between parent and child
1384        };
1385        
1386        // Add shared memory map to parent
1387        parent_task.vm_manager.add_memory_map(shared_mmap).unwrap();
1388        
1389        // Write test data to shared memory
1390        let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
1391        unsafe {
1392            let shared_ptr = paddr as *mut u8;
1393            core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
1394        }
1395
1396        // Clone the parent task
1397        let child_task = parent_task.clone_task(CloneFlags::default()).unwrap();
1398
1399        // Find the shared memory map in child
1400        let child_shared_mmap = child_task.vm_manager.get_memmap().iter()
1401            .find(|mmap| mmap.vmarea.start == shared_vaddr && mmap.is_shared)
1402            .expect("Shared memory map not found in child task");
1403
1404        // Verify that the physical addresses are the same (shared memory)
1405        assert_eq!(child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
1406            "Shared memory should have the same physical address in parent and child");
1407        
1408        // Verify that the virtual addresses are the same
1409        assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
1410        assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
1411        
1412        // Verify that is_shared flag is preserved
1413        assert!(child_shared_mmap.is_shared, "Shared memory should remain marked as shared");
1414
1415        // Verify that modifying shared memory from child affects parent
1416        unsafe {
1417            let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
1418            let original_value = *child_shared_ptr;
1419            *child_shared_ptr = 0xFF; // Modify first byte through child reference
1420            
1421            let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
1422            let parent_first_byte = *parent_shared_ptr;
1423            
1424            // Parent should see the change made by child (shared memory)
1425            assert_eq!(parent_first_byte, 0xFF, 
1426                "Parent should see changes made through child's shared memory reference");
1427                
1428            // Restore original value
1429            *child_shared_ptr = original_value;
1430        }
1431        
1432        // Verify that the shared data content is accessible from both
1433        unsafe {
1434            let child_ptr = child_shared_mmap.pmarea.start as *const u8;
1435            let parent_ptr = shared_mmap.pmarea.start as *const u8;
1436            
1437            // Check that the data content is identical and accessible from both
1438            for i in 0..test_data.len() {
1439                let parent_byte = *parent_ptr.offset(i as isize);
1440                let child_byte = *child_ptr.offset(i as isize);
1441                assert_eq!(parent_byte, child_byte, 
1442                    "Shared memory data should be identical from both parent and child views");
1443            }
1444        }
1445    }
1446}