kernel/task/
mod.rs

1//! Task module.
2//!
3//! The task module defines the structure and behavior of tasks in the system.
4
5pub mod syscall;
6pub mod elf_loader;
7
8extern crate alloc;
9
10use alloc::{boxed::Box, string::{String, ToString}, sync::Arc, vec::Vec};
11use spin::Mutex;
12
13use crate::{arch::{get_cpu, vcpu::Vcpu}, environment::{DEAFAULT_MAX_TASK_DATA_SIZE, DEAFAULT_MAX_TASK_STACK_SIZE, DEAFAULT_MAX_TASK_TEXT_SIZE, KERNEL_VM_STACK_END, PAGE_SIZE}, fs::{File, VfsManager}, library::std::print, mem::page::{allocate_raw_pages, free_boxed_page, Page}, println, sched::scheduler::get_scheduler, vm::{manager::VirtualMemoryManager, user_kernel_vm_init, user_vm_init, vmem::{MemoryArea, VirtualMemoryMap, VirtualMemoryRegion}}};
14use crate::abi::{scarlet::ScarletAbi, AbiModule};
15
16/// The maximum number of file descriptors a task can have.
17/// This value is set to 256 as a reasonable default for most use cases,
18/// balancing resource usage and typical application needs. Adjust if necessary.
19const NUM_OF_FDS: usize = 256;
20
21#[derive(Debug, PartialEq, Clone, Copy)]
22pub enum TaskState {
23    NotInitialized,
24    Ready,
25    Running,
26    Blocked,
27    Zombie,
28    Terminated,
29}
30
31#[derive(Debug, PartialEq, Clone, Copy)]
32pub enum TaskType {
33    Kernel,
34    User,
35}
36
37pub struct Task {
38    id: usize,
39    pub name: String,
40    pub priority: u32,
41    pub vcpu: Vcpu,
42    pub state: TaskState,
43    pub task_type: TaskType,
44    pub entry: usize,
45    pub brk: Option<usize>, /* Program break (NOT work in Kernel task) */
46    pub stack_size: usize, /* Size of the stack in bytes */
47    pub data_size: usize, /* Size of the data segment in bytes (page unit) (NOT work in Kernel task) */
48    pub text_size: usize, /* Size of the text segment in bytes (NOT work in Kernel task) */
49    pub max_stack_size: usize, /* Maximum size of the stack in bytes */
50    pub max_data_size: usize, /* Maximum size of the data segment in bytes */
51    pub max_text_size: usize, /* Maximum size of the text segment in bytes */
52    pub vm_manager: VirtualMemoryManager,
53    /// Managed pages
54    /// 
55    /// Managed pages are freed automatically when the task is terminated.
56    pub managed_pages: Vec<ManagedPage>,
57    parent_id: Option<usize>,      /* Parent task ID */
58    children: Vec<usize>,          /* List of child task IDs */
59    exit_status: Option<i32>,      /* Exit code (for monitoring child task termination) */
60
61    /// Dynamic ABI
62    pub abi: Option<Box<dyn AbiModule>>,
63
64    // File descriptors (File) table
65    fd_table: Vec<usize>,
66    files: [Option<File>; 256],
67    // Current working directory
68    pub cwd: Option<String>,
69
70    /// Virtual File System Manager
71    /// 
72    /// Each task can have its own isolated VfsManager instance for containerization
73    /// and namespace isolation. The VfsManager provides:
74    /// 
75    /// - **Filesystem Isolation**: Independent mount point namespaces allowing
76    ///   complete filesystem isolation between tasks or containers
77    /// - **Selective Sharing**: Arc-based filesystem object sharing enables
78    ///   controlled resource sharing while maintaining namespace independence
79    /// - **Bind Mount Support**: Advanced bind mount capabilities for flexible
80    ///   directory mapping and container orchestration scenarios
81    /// - **Security**: Path normalization and validation preventing directory
82    ///   traversal attacks and unauthorized filesystem access
83    /// 
84    /// # Usage Patterns
85    /// 
86    /// - `None`: Task uses global filesystem namespace (traditional Unix-like behavior)
87    /// - `Some(Arc<VfsManager>)`: Task has isolated filesystem namespace (container-like behavior)
88    /// 
89    /// # Thread Safety
90    /// 
91    /// VfsManager is thread-safe and can be shared between tasks using Arc.
92    /// All internal operations use RwLock for concurrent access protection.
93    pub vfs: Option<Arc<VfsManager>>,
94}
95
96#[derive(Debug, Clone)]
97pub struct ManagedPage {
98    pub vaddr: usize,
99    pub page: Box<Page>,
100}
101
102static TASK_ID: Mutex<usize> = Mutex::new(1);
103
104impl Task {
105    pub fn new(name: String, priority: u32, task_type: TaskType) -> Self {
106        let mut taskid = TASK_ID.lock();
107        let mut task = Task {
108            id: *taskid,
109            name,
110            priority,
111            vcpu: Vcpu::new(match task_type {
112                TaskType::Kernel => crate::arch::vcpu::Mode::Kernel,
113                TaskType::User => crate::arch::vcpu::Mode::User,
114            }),
115            state: TaskState::NotInitialized,
116            task_type,
117            entry: 0,
118            brk: None,
119            stack_size: 0,
120            data_size: 0,
121            text_size: 0,
122            max_stack_size: DEAFAULT_MAX_TASK_STACK_SIZE,
123            max_data_size: DEAFAULT_MAX_TASK_DATA_SIZE,
124            max_text_size: DEAFAULT_MAX_TASK_TEXT_SIZE,
125            vm_manager: VirtualMemoryManager::new(),
126            managed_pages: Vec::new(),
127            parent_id: None,
128            children: Vec::new(),
129            exit_status: None,
130            abi: Some(Box::new(ScarletAbi::default())), // Default ABI
131            fd_table: Vec::new(),
132            files: [ const { None }; NUM_OF_FDS],
133            cwd: None,
134            vfs: None,
135        };
136        
137        for i in (0..NUM_OF_FDS).rev() {
138            task.fd_table.push(i);
139        }
140
141        *taskid += 1;
142        task
143    }
144    
145    pub fn init(&mut self) {
146        match self.task_type {
147            TaskType::Kernel => {
148                user_kernel_vm_init(self);
149                /* Set sp to the top of the kernel stack */
150                self.vcpu.set_sp(KERNEL_VM_STACK_END + 1);
151
152            },
153            TaskType::User => { 
154                user_vm_init(self);
155                /* Set sp to the top of the user stack */
156                self.vcpu.set_sp(0xffff_ffff_ffff_f000);
157            }
158        }
159        
160        /* Set the task state to Ready */
161        self.state = TaskState::Ready;
162    }
163
164    pub fn get_id(&self) -> usize {
165        self.id
166    }
167
168    /// Set the task state
169    /// 
170    /// # Arguments
171    /// * `state` - The new task state
172    /// 
173    pub fn set_state(&mut self, state: TaskState) {
174        self.state = state;
175    }
176
177    /// Get the task state
178    /// 
179    /// # Returns
180    /// The task state
181    /// 
182    pub fn get_state(&self) -> TaskState {
183        self.state
184    }
185
186   /// Get the size of the task.
187   /// 
188   /// # Returns
189   /// The size of the task in bytes.
190    pub fn get_size(&self) -> usize {
191        self.stack_size + self.text_size + self.data_size
192    }
193
194    /// Get the program break (NOT work in Kernel task)
195    /// 
196    /// # Returns
197    /// The program break address
198    pub fn get_brk(&self) -> usize {
199        if self.brk.is_none() {
200            return self.text_size + self.data_size;
201        }
202        self.brk.unwrap()
203    }
204
205    /// Set the program break (NOT work in Kernel task)
206    /// 
207    /// # Arguments
208    /// * `brk` - The new program break address
209    /// 
210    /// # Returns
211    /// If successful, returns Ok(()), otherwise returns an error.
212    pub fn set_brk(&mut self, brk: usize) -> Result<(), &'static str> {
213        // println!("New brk: {:#x}", brk);
214        if brk < self.text_size {
215            return Err("Invalid address");
216        }
217        let prev_brk = self.get_brk();
218        if brk < prev_brk {
219            /* Free pages */
220            /* Round address to the page boundary */
221            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
222            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
223            let num_of_pages = (prev_addr - addr) / PAGE_SIZE;
224            self.free_data_pages(addr, num_of_pages);            
225        } else if brk > prev_brk {
226            /* Allocate pages */
227            /* Round address to the page boundary */
228            let prev_addr = (prev_brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
229            let addr = (brk + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
230            let num_of_pages = (addr - prev_addr) / PAGE_SIZE;
231
232            if num_of_pages > 0 {
233                match self.vm_manager.search_memory_map(prev_addr) {
234                    Some(_) => {},
235                    None => {
236                        match self.allocate_data_pages(prev_addr, num_of_pages) {
237                            Ok(_) => {},
238                            Err(_) => return Err("Failed to allocate pages"),
239                        }
240                    },
241                }
242            }
243        }
244        self.brk = Some(brk);
245        Ok(())
246    }
247
248    /// Allocate pages for the task.
249    /// 
250    /// # Arguments
251    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
252    /// * `num_of_pages` - The number of pages to allocate
253    /// * `segment` - The segment type to allocate pages
254    /// 
255    /// # Returns
256    /// The memory map of the allocated pages, if successful.
257    /// 
258    /// # Errors
259    /// If the address is not page aligned, or if the pages cannot be allocated.
260    /// 
261    /// # Note
262    /// This function don't increment the size of the task.
263    /// You must increment the size of the task manually.
264    /// 
265    pub fn allocate_pages(&mut self, vaddr: usize, num_of_pages: usize, permissions: usize) -> Result<VirtualMemoryMap, &'static str> {
266
267        if vaddr % PAGE_SIZE != 0 {
268            return Err("Address is not page aligned");
269        }
270        
271        let pages = allocate_raw_pages(num_of_pages);
272        let size = num_of_pages * PAGE_SIZE;
273        let paddr = pages as usize;
274        let mmap = VirtualMemoryMap {
275            pmarea: MemoryArea {
276                start: paddr,
277                end: paddr + size - 1,
278            },
279            vmarea: MemoryArea {
280                start: vaddr,
281                end: vaddr + size - 1,
282            },
283            permissions,
284            is_shared: false, // Default to not shared for task-allocated pages
285        };
286        self.vm_manager.add_memory_map(mmap).map_err(|e| panic!("Failed to add memory map: {}", e))?;
287
288        for i in 0..num_of_pages {
289            let page = unsafe { Box::from_raw(pages.wrapping_add(i)) };
290            let vaddr = mmap.vmarea.start + i * PAGE_SIZE;
291            self.add_managed_page(ManagedPage {
292                vaddr,
293                page
294            });
295        }
296
297
298        Ok(mmap)
299    }
300
301    /// Free pages for the task.
302    /// 
303    /// # Arguments
304    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
305    /// * `num_of_pages` - The number of pages to free
306    pub fn free_pages(&mut self, vaddr: usize, num_of_pages: usize) {
307        let page = vaddr / PAGE_SIZE;
308        for p in 0..num_of_pages {
309            let vaddr = (page + p) * PAGE_SIZE;
310            match self.vm_manager.search_memory_map_idx(vaddr) {
311                Some(idx) => {
312                    let mmap = self.vm_manager.remove_memory_map(idx).unwrap();
313                    if p == 0 && mmap.vmarea.start < vaddr {
314                        /* Re add the first part of the memory map */
315                        let size = vaddr - mmap.vmarea.start;
316                        let paddr = mmap.pmarea.start;
317                        let mmap1 = VirtualMemoryMap {
318                            pmarea: MemoryArea {
319                                start: paddr,
320                                end: paddr + size - 1,
321                            },
322                            vmarea: MemoryArea {
323                                start: mmap.vmarea.start,
324                                end: vaddr - 1,
325                            },
326                            permissions: mmap.permissions,
327                            is_shared: mmap.is_shared,
328                        };
329                        self.vm_manager.add_memory_map(mmap1)
330                            .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
331                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
332                        // println!("Re added map: {:#x} - {:#x}", mmap1.vmarea.start, mmap1.vmarea.end);
333                    }
334                    if p == num_of_pages - 1 && mmap.vmarea.end > vaddr + PAGE_SIZE - 1 {
335                        /* Re add the second part of the memory map */
336                        let size = mmap.vmarea.end - (vaddr + PAGE_SIZE) + 1;
337                        let paddr = mmap.pmarea.start + (vaddr + PAGE_SIZE - mmap.vmarea.start);
338                        let mmap2 = VirtualMemoryMap {
339                            pmarea: MemoryArea {
340                                start: paddr,
341                                end: paddr + size - 1,
342                            },
343                            vmarea: MemoryArea {
344                                start: vaddr + PAGE_SIZE,
345                                end: mmap.vmarea.end,
346                            },
347                            permissions: mmap.permissions,
348                            is_shared: mmap.is_shared,
349                        };
350                        self.vm_manager.add_memory_map(mmap2)
351                            .map_err(|e| panic!("Failed to add memory map: {}", e)).unwrap();
352                        // println!("Removed map : {:#x} - {:#x}", mmap.vmarea.start, mmap.vmarea.end);
353                        // println!("Re added map: {:#x} - {:#x}", mmap2.vmarea.start, mmap2.vmarea.end);
354                    }
355                    // let offset = vaddr - mmap.vmarea.start;
356                    // free_raw_pages((mmap.pmarea.start + offset) as *mut Page, 1);
357
358                    if let Some(free_page) = self.remove_managed_page(vaddr) {
359                        free_boxed_page(free_page);
360                    }
361                    
362                    // println!("Freed pages : {:#x} - {:#x}", vaddr, vaddr + PAGE_SIZE - 1);
363                },
364                None => {},
365            }
366        }
367        /* Unmap pages */
368        let root_pagetable = self.vm_manager.get_root_page_table().unwrap();
369        for p in 0..num_of_pages {
370            let vaddr = (page + p) * PAGE_SIZE;
371            root_pagetable.unmap(vaddr);
372        }
373    }
374
375    /// Allocate text pages for the task. And increment the size of the task.
376    ///
377    /// # Arguments
378    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
379    /// * `num_of_pages` - The number of pages to allocate
380    /// 
381    /// # Returns
382    /// The memory map of the allocated pages, if successful.
383    /// 
384    /// # Errors
385    /// If the address is not page aligned, or if the pages cannot be allocated.
386    /// 
387    pub fn allocate_text_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
388        let permissions = VirtualMemoryRegion::Text.default_permissions();
389        let res = self.allocate_pages(vaddr, num_of_pages, permissions);   
390        if res.is_ok() {
391            self.text_size += num_of_pages * PAGE_SIZE;
392        }
393        res
394    }
395
396    /// Free text pages for the task. And decrement the size of the task.
397    /// 
398    /// # Arguments
399    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
400    /// * `num_of_pages` - The number of pages to free
401    /// 
402    pub fn free_text_pages(&mut self, vaddr: usize, num_of_pages: usize) {
403        self.free_pages(vaddr, num_of_pages);
404        self.text_size -= num_of_pages * PAGE_SIZE;
405    }
406
407    /// Allocate stack pages for the task. And increment the size of the task.
408    ///
409    /// # Arguments
410    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
411    /// * `num_of_pages` - The number of pages to allocate
412    /// 
413    /// # Returns
414    /// The memory map of the allocated pages, if successful.
415    /// 
416    /// # Errors
417    /// If the address is not page aligned, or if the pages cannot be allocated.
418    /// 
419    pub fn allocate_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
420        let permissions = VirtualMemoryRegion::Stack.default_permissions();
421        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
422        self.stack_size += num_of_pages * PAGE_SIZE;
423        Ok(res)
424    }
425
426    /// Free stack pages for the task. And decrement the size of the task.
427    /// 
428    /// # Arguments
429    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
430    /// * `num_of_pages` - The number of pages to free
431    /// 
432    pub fn free_stack_pages(&mut self, vaddr: usize, num_of_pages: usize) {
433        self.free_pages(vaddr, num_of_pages);
434        self.stack_size -= num_of_pages * PAGE_SIZE;
435    }
436
437    /// Allocate data pages for the task. And increment the size of the task.
438    ///
439    /// # Arguments
440    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
441    /// * `num_of_pages` - The number of pages to allocate
442    /// 
443    /// # Returns
444    /// The memory map of the allocated pages, if successful.
445    /// 
446    /// # Errors
447    /// If the address is not page aligned, or if the pages cannot be allocated.
448    /// 
449    pub fn allocate_data_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
450        let permissions = VirtualMemoryRegion::Data.default_permissions();
451        let res = self.allocate_pages(vaddr, num_of_pages, permissions)?;
452        self.data_size += num_of_pages * PAGE_SIZE;
453        Ok(res)
454    }
455
456    /// Free data pages for the task. And decrement the size of the task.
457    /// 
458    /// # Arguments
459    /// * `vaddr` - The virtual address to free pages (NOTE: The address must be page aligned)
460    /// * `num_of_pages` - The number of pages to free
461    /// 
462    pub fn free_data_pages(&mut self, vaddr: usize, num_of_pages: usize) {
463        self.free_pages(vaddr, num_of_pages);
464        self.data_size -= num_of_pages * PAGE_SIZE;
465    }
466
467    /// Allocate guard pages for the task.
468    /// 
469    /// # Arguments
470    /// * `vaddr` - The virtual address to allocate pages (NOTE: The address must be page aligned)
471    /// * `num_of_pages` - The number of pages to allocate
472    /// 
473    /// # Returns
474    /// The memory map of the allocated pages, if successful.
475    /// 
476    /// # Errors
477    /// If the address is not page aligned, or if the pages cannot be allocated.
478    /// 
479    /// # Note
480    /// Gurad pages are not allocated in the physical memory space.
481    /// This function only maps the pages to the virtual memory space.
482    /// 
483    pub fn allocate_guard_pages(&mut self, vaddr: usize, num_of_pages: usize) -> Result<VirtualMemoryMap, &'static str> {
484        let permissions = VirtualMemoryRegion::Guard.default_permissions();
485        let mmap = VirtualMemoryMap {
486            pmarea: MemoryArea {
487                start: 0,
488                end: 0,
489            },
490            vmarea: MemoryArea {
491                start: vaddr,
492                end: vaddr + num_of_pages * PAGE_SIZE - 1,
493            },
494            permissions,
495            is_shared: VirtualMemoryRegion::Guard.is_shareable(), // Guard pages can be shared
496        };
497        Ok(mmap)
498    }
499
500    /// Add pages to the task
501    /// 
502    /// # Arguments
503    /// * `pages` - The managed page to add
504    /// 
505    /// # Note
506    /// Pages added as ManagedPage of the Task will be automatically freed when the Task is terminated.
507    /// So, you must not free them by calling free_raw_pages/free_boxed_pages manually.
508    /// 
509    pub fn add_managed_page(&mut self, pages: ManagedPage) {
510        self.managed_pages.push(pages);
511    }
512
513    /// Get managed page
514    /// 
515    /// # Arguments
516    /// * `vaddr` - The virtual address of the page
517    /// 
518    /// # Returns
519    /// The managed page if found, otherwise None
520    /// 
521    fn get_managed_page(&self, vaddr: usize) -> Option<&ManagedPage> {
522        for page in &self.managed_pages {
523            if page.vaddr == vaddr {
524                return Some(page);
525            }
526        }
527        None
528    }
529
530    /// Remove managed page
531    /// 
532    /// # Arguments
533    /// * `vaddr` - The virtual address of the page
534    /// 
535    /// # Returns
536    /// The removed managed page if found, otherwise None
537    /// 
538    fn remove_managed_page(&mut self, vaddr: usize) -> Option<Box<Page>> {
539        for i in 0..self.managed_pages.len() {
540            if self.managed_pages[i].vaddr == vaddr {
541                let page = self.managed_pages.remove(i);
542                return Some(page.page);
543            }
544        }
545        None
546    }
547
548    // Set the entry point
549    pub fn set_entry_point(&mut self, entry: usize) {
550        self.vcpu.set_pc(entry as u64);
551    }
552
553    /// Get the parent ID
554    ///
555    /// # Returns
556    /// The parent task ID, or None if there is no parent
557    pub fn get_parent_id(&self) -> Option<usize> {
558        self.parent_id
559    }
560    
561    /// Set the parent task
562    ///
563    /// # Arguments
564    /// * `parent_id` - The ID of the parent task
565    pub fn set_parent_id(&mut self, parent_id: usize) {
566        self.parent_id = Some(parent_id);
567    }
568    
569    /// Add a child task
570    ///
571    /// # Arguments
572    /// * `child_id` - The ID of the child task
573    pub fn add_child(&mut self, child_id: usize) {
574        if !self.children.contains(&child_id) {
575            self.children.push(child_id);
576        }
577    }
578    
579    /// Remove a child task
580    ///
581    /// # Arguments
582    /// * `child_id` - The ID of the child task to remove
583    ///
584    /// # Returns
585    /// true if the removal was successful, false if the child task was not found
586    pub fn remove_child(&mut self, child_id: usize) -> bool {
587        if let Some(pos) = self.children.iter().position(|&id| id == child_id) {
588            self.children.remove(pos);
589            true
590        } else {
591            false
592        }
593    }
594    
595    /// Get the list of child tasks
596    ///
597    /// # Returns
598    /// A vector of child task IDs
599    pub fn get_children(&self) -> &Vec<usize> {
600        &self.children
601    }
602    
603    /// Set the exit status
604    ///
605    /// # Arguments
606    /// * `status` - The exit status
607    pub fn set_exit_status(&mut self, status: i32) {
608        self.exit_status = Some(status);
609    }
610    
611    /// Get the exit status
612    ///
613    /// # Returns
614    /// The exit status, or None if not set
615    pub fn get_exit_status(&self) -> Option<i32> {
616        self.exit_status
617    }
618
619    /// Get the file descriptor table
620    /// 
621    /// # Returns
622    /// A reference to the file descriptor table
623    /// 
624    pub fn get_fd_table(&self) -> &Vec<usize> {
625        &self.fd_table
626    }
627
628    /// Get the file at the specified index
629    /// 
630    /// # Arguments
631    /// * `index` - The index of the file
632    /// 
633    /// # Returns
634    /// The file at the specified index, or None if not found
635    /// 
636    pub fn get_file(&self, index: usize) -> Option<&File> {
637        if index < NUM_OF_FDS {
638            self.files[index].as_ref()
639        } else {
640            None
641        }
642    }
643
644    /// Get the mutable file at the specified file descriptor
645    /// 
646    /// # Arguments
647    /// * `fd` - The file descriptor of the file
648    /// 
649    /// # Returns
650    /// The mutable file at the specified file descriptor, or None if not found
651    /// 
652    pub fn get_mut_file(&mut self, fd: usize) -> Option<&mut File> {
653        if fd < NUM_OF_FDS {
654            self.files[fd].as_mut()
655        } else {
656            None
657        }
658    }
659
660    /// Set the file at the specified file descriptor
661    /// 
662    /// # Arguments
663    /// * `fd` - The file descriptor of the file
664    /// * `file` - The file to set
665    /// 
666    /// # Returns
667    /// The result of setting the file, which is Ok(()) if successful or an error message if not.
668    /// 
669    pub fn set_file(&mut self, fd: usize, file: File) -> Result<(), &'static str> {
670        if fd < NUM_OF_FDS {
671            self.files[fd] = Some(file);
672            Ok(())
673        } else {
674            Err("Index out of bounds")
675        }
676    }
677
678    /// Add a file to the task
679    /// 
680    /// # Arguments
681    /// * `file` - The file handle to add
682    /// 
683    /// # Returns
684    /// The file descriptor of the file, or an error message if the file descriptor table is full
685    /// 
686    pub fn add_file(&mut self, file: File) -> Result<usize, &'static str> {
687        if let Some(fd) = self.allocate_fd() {
688            self.files[fd] = Some(file);
689            Ok(fd)
690        } else {
691            Err("File descriptor table is full")
692        }
693    }
694
695    /// Remove a file from the task
696    /// 
697    /// # Arguments
698    /// * `fd` - The file descriptor to remove
699    /// 
700    /// # Returns
701    /// The result of removing the file, which is Ok(()) if successful or an error message if not.
702    /// 
703    pub fn remove_file(&mut self, fd: usize) -> Result<(), &'static str> {
704        if fd < NUM_OF_FDS {
705            if self.files[fd].is_none() {
706                return Err("File descriptor is already empty");
707            }
708            self.files[fd] = None;
709            self.fd_table.push(fd);
710            Ok(())
711        } else {
712            Err("File descriptor out of bounds")
713        }
714    }
715
716    /// Allocate a file descriptor
717    /// 
718    /// # Returns
719    /// The allocated file descriptor, or None if no file descriptors are available
720    /// 
721    fn allocate_fd(&mut self) -> Option<usize> {
722        if let Some(fd) = self.fd_table.pop() {
723            Some(fd)
724        } else {
725            None
726        }
727    }
728
729
730
731    /// Clone this task, creating a near-identical copy
732    /// 
733    /// # Returns
734    /// The cloned task
735    /// 
736    /// # Errors 
737    /// If the task cannot be cloned, an error is returned.
738    ///
739    pub fn clone_task(&mut self) -> Result<Task, &'static str> {
740        // Create a new task (but don't call init() yet)
741        let mut child = Task::new(
742            self.name.clone(),
743            self.priority,
744            self.task_type
745        );
746        
747        // First, set up the virtual memory manager with the same ASID allocation
748        match self.task_type {
749            TaskType::Kernel => {
750                // For kernel tasks, we need to call init to set up the kernel VM
751                child.init();
752            },
753            TaskType::User => {
754                // For user tasks, manually set up VM without calling init()
755                // to avoid creating new stack that would overwrite parent's stack content
756                use crate::arch::vm::alloc_virtual_address_space;
757                let asid = alloc_virtual_address_space();
758                child.vm_manager.set_asid(asid);
759                child.state = TaskState::Ready;
760            }
761        }
762        
763        // Copy or share memory maps from parent to child
764        for mmap in self.vm_manager.get_memmap() {
765            let num_pages = (mmap.vmarea.end - mmap.vmarea.start + 1 + PAGE_SIZE - 1) / PAGE_SIZE;
766            let vaddr = mmap.vmarea.start;
767            
768            if num_pages > 0 {
769                if mmap.is_shared {
770                    // Shared memory regions: just reference the same physical pages
771                    let shared_mmap = VirtualMemoryMap {
772                        pmarea: mmap.pmarea, // Same physical memory
773                        vmarea: mmap.vmarea, // Same virtual addresses
774                        permissions: mmap.permissions,
775                        is_shared: true,
776                    };
777                    // Add the shared memory map directly to the child task
778                    child.vm_manager.add_memory_map(shared_mmap)
779                        .map_err(|_| "Failed to add shared memory map to child task")?;
780
781                    // TODO: Add logic to determine if the memory map is a trampoline
782                    // If the memory map is the trampoline, pre-map it
783                    if mmap.vmarea.start == 0xffff_ffff_ffff_f000 {
784                        // Pre-map the trampoline page
785                        let root_pagetable = child.vm_manager.get_root_page_table().unwrap();
786                        root_pagetable.map_memory_area(shared_mmap)?;
787                    }
788
789                } else {
790                    // Private memory regions: allocate new pages and copy contents
791                    let permissions = mmap.permissions;
792                    let pages = allocate_raw_pages(num_pages);
793                    let size = num_pages * PAGE_SIZE;
794                    let paddr = pages as usize;
795                    let new_mmap = VirtualMemoryMap {
796                        pmarea: MemoryArea {
797                            start: paddr,
798                            end: paddr + (size - 1),
799                        },
800                        vmarea: MemoryArea {
801                            start: vaddr,
802                            end: vaddr + (size - 1),
803                        },
804                        permissions,
805                        is_shared: false,
806                    };
807                    
808                    // Copy the contents of the original memory (including stack contents)
809                    for i in 0..num_pages {
810                        let src_page_addr = mmap.pmarea.start + i * PAGE_SIZE;
811                        let dst_page_addr = new_mmap.pmarea.start + i * PAGE_SIZE;
812                        unsafe {
813                            core::ptr::copy_nonoverlapping(
814                                src_page_addr as *const u8,
815                                dst_page_addr as *mut u8,
816                                PAGE_SIZE
817                            );
818                        }
819                        // Manage the new pages in the child task
820                        child.add_managed_page(ManagedPage {
821                            vaddr: new_mmap.vmarea.start + i * PAGE_SIZE,
822                            page: unsafe { Box::from_raw(pages.wrapping_add(i)) },
823                        });
824                    }
825                    // Add the new memory map to the child task
826                    child.vm_manager.add_memory_map(new_mmap)
827                        .map_err(|_| "Failed to add memory map to child task")?;
828                }
829            }
830        }
831        // Copy register states
832        child.vcpu.regs = self.vcpu.regs.clone();
833        
834        // Copy state such as data size
835        child.stack_size = self.stack_size;
836        child.data_size = self.data_size;
837        child.text_size = self.text_size;
838        child.max_stack_size = self.max_stack_size;
839        child.max_data_size = self.max_data_size;
840        child.max_text_size = self.max_text_size;
841        
842        // Set the same entry point and PC
843        child.entry = self.entry;
844        child.vcpu.set_pc(self.vcpu.get_pc());
845
846        // Copy file descriptors
847        child.fd_table = self.fd_table.clone();
848        child.files = self.files.clone();
849
850        // Clone vfs manager pointer
851        if let Some(vfs) = &self.vfs {
852            child.vfs = Some(vfs.clone());
853        } else {
854            child.vfs = None;
855        }
856
857        // Copy the current working directory
858        child.cwd = self.cwd.clone();
859        
860        // Set the state to Ready
861        child.state = self.state;
862
863        // Set parent-child relationship
864        child.set_parent_id(self.id);
865        self.add_child(child.get_id());
866
867        Ok(child)
868    }
869
870    /// Exit the task
871    /// 
872    /// # Arguments
873    /// * `status` - The exit status
874    /// 
875    pub fn exit(&mut self, status: i32) {
876        match self.parent_id {
877            Some(parent_id) => {
878                if get_scheduler().get_task_by_id(parent_id).is_none() {
879                    self.state = TaskState::Terminated;
880                    return;
881                }
882                /* Set the exit status */
883                self.set_exit_status(status);
884                self.state = TaskState::Zombie;
885            },
886            None => {
887                /* If the task has no parent, it is terminated */
888                self.state = TaskState::Terminated;
889            }
890        }
891    }
892
893    /// Wait for a child task to exit and collect its status
894    /// 
895    /// # Arguments
896    /// * `child_id` - The ID of the child task to wait for
897    /// 
898    /// # Returns
899    /// The exit status of the child task, or an error if the child is not found or not in Zombie state
900    pub fn wait(&mut self, child_id: usize) -> Result<i32, WaitError> {
901        if !self.children.contains(&child_id) {
902            return Err(WaitError::NoSuchChild("No such child task".to_string()));
903        }
904
905        if let Some(child_task) = get_scheduler().get_task_by_id(child_id) {
906            if child_task.get_state() == TaskState::Zombie {
907                let status = child_task.get_exit_status().unwrap_or(-1);
908                child_task.set_state(TaskState::Terminated);
909                self.remove_child(child_id);
910                Ok(status)
911            } else {
912                Err(WaitError::ChildNotExited("Child has not exited or is not a zombie".to_string()))
913            }
914        } else {
915            Err(WaitError::ChildTaskNotFound("Child task not found".to_string()))
916        }
917    }
918}
919
920pub enum WaitError {
921    NoSuchChild(String),
922    ChildNotExited(String),
923    ChildTaskNotFound(String),
924}
925
926impl WaitError {
927    pub fn message(&self) -> &str {
928        match self {
929            WaitError::NoSuchChild(msg) => msg,
930            WaitError::ChildNotExited(msg) => msg,
931            WaitError::ChildTaskNotFound(msg) => msg,
932        }
933    }
934}
935
936/// Create a new kernel task.
937/// 
938/// # Arguments
939/// * `name` - The name of the task
940/// * `priority` - The priority of the task
941/// * `func` - The function to run in the task
942/// 
943/// # Returns
944/// The new task.
945pub fn new_kernel_task(name: String, priority: u32, func: fn()) -> Task {
946    let mut task = Task::new(name, priority, TaskType::Kernel);
947    task.entry = func as usize;
948    task
949}
950
951/// Create a new user task.
952/// 
953/// # Arguments
954/// * `name` - The name of the task
955/// * `priority` - The priority of the task
956/// 
957/// # Returns
958/// The new task.
959pub fn new_user_task(name: String, priority: u32) -> Task {
960    Task::new(name, priority, TaskType::User)
961}
962
963/// Get the current task.
964/// 
965/// # Returns
966/// The current task if it exists.
967pub fn mytask() -> Option<&'static mut Task> {
968    let cpu = get_cpu();
969    get_scheduler().get_current_task(cpu.get_cpuid())
970}
971
972#[cfg(test)]
973mod tests {
974    use alloc::string::ToString;
975
976    #[test_case]
977    fn test_set_brk() {
978        let mut task = super::new_user_task("Task0".to_string(), 0);
979        task.init();
980        assert_eq!(task.get_brk(), 0);
981        task.set_brk(0x1000).unwrap();
982        assert_eq!(task.get_brk(), 0x1000);
983        task.set_brk(0x2000).unwrap();
984        assert_eq!(task.get_brk(), 0x2000);
985        task.set_brk(0x1008).unwrap();
986        assert_eq!(task.get_brk(), 0x1008);
987        task.set_brk(0x1000).unwrap();
988        assert_eq!(task.get_brk(), 0x1000);
989    }
990
991    #[test_case]
992    fn test_task_parent_child_relationship() {
993        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
994        parent_task.init();
995
996        let mut child_task = super::new_user_task("ChildTask".to_string(), 0);
997        child_task.init();
998
999        // Set parent-child relationship
1000        child_task.set_parent_id(parent_task.get_id());
1001        parent_task.add_child(child_task.get_id());
1002
1003        // Verify parent-child relationship
1004        assert_eq!(child_task.get_parent_id(), Some(parent_task.get_id()));
1005        assert!(parent_task.get_children().contains(&child_task.get_id()));
1006
1007        // Remove child and verify
1008        assert!(parent_task.remove_child(child_task.get_id()));
1009        assert!(!parent_task.get_children().contains(&child_task.get_id()));
1010    }
1011
1012    #[test_case]
1013    fn test_task_exit_status() {
1014        let mut task = super::new_user_task("TaskWithExitStatus".to_string(), 0);
1015        task.init();
1016
1017        // Verify initial exit status is None
1018        assert_eq!(task.get_exit_status(), None);
1019
1020        // Set and verify exit status
1021        task.set_exit_status(0);
1022        assert_eq!(task.get_exit_status(), Some(0));
1023
1024        task.set_exit_status(1);
1025        assert_eq!(task.get_exit_status(), Some(1));
1026    }
1027
1028    #[test_case]
1029    fn test_clone_task_memory_copy() {
1030        let mut parent_task = super::new_user_task("ParentTask".to_string(), 0);
1031        parent_task.init();
1032
1033        // Allocate some memory pages for the parent task
1034        let vaddr = 0x1000;
1035        let num_pages = 2;
1036        let mmap = parent_task.allocate_data_pages(vaddr, num_pages).unwrap();
1037
1038        // Write test data to parent's memory
1039        let test_data: [u8; 8] = [0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0];
1040        unsafe {
1041            let dst_ptr = mmap.pmarea.start as *mut u8;
1042            core::ptr::copy_nonoverlapping(test_data.as_ptr(), dst_ptr, test_data.len());
1043        }
1044
1045        // Get parent memory map count before cloning
1046        let parent_memmap_count = parent_task.vm_manager.get_memmap().len();
1047        let parent_id = parent_task.get_id();
1048
1049        // Clone the parent task
1050        let child_task = parent_task.clone_task().unwrap();
1051
1052        // Get child memory map count after cloning
1053        let child_memmap_count = child_task.vm_manager.get_memmap().len();
1054
1055        // Verify that the number of memory maps are identical
1056        assert_eq!(child_memmap_count, parent_memmap_count, 
1057            "Child should have the same number of memory maps as parent: child={}, parent={}",
1058            child_memmap_count, parent_memmap_count);
1059
1060        // Verify parent-child relationship was established
1061        assert_eq!(child_task.get_parent_id(), Some(parent_id));
1062        assert!(parent_task.get_children().contains(&child_task.get_id()));
1063
1064        // Verify memory sizes were copied
1065        assert_eq!(child_task.stack_size, parent_task.stack_size);
1066        assert_eq!(child_task.data_size, parent_task.data_size);
1067        assert_eq!(child_task.text_size, parent_task.text_size);
1068
1069        // Find the corresponding memory map in child that matches our test allocation
1070        let child_memmaps = child_task.vm_manager.get_memmap();
1071        let child_mmap = child_memmaps.iter()
1072            .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1073            .expect("Test memory map not found in child task");
1074
1075        // Verify that our specific memory region exists in both parent and child
1076        let parent_memmaps = parent_task.vm_manager.get_memmap();
1077        let parent_test_mmap = parent_memmaps.iter()
1078            .find(|mmap| mmap.vmarea.start == vaddr && mmap.vmarea.end == vaddr + num_pages * crate::environment::PAGE_SIZE - 1)
1079            .expect("Test memory map not found in parent task");
1080
1081        // Verify the virtual memory ranges match
1082        assert_eq!(child_mmap.vmarea.start, parent_test_mmap.vmarea.start);
1083        assert_eq!(child_mmap.vmarea.end, parent_test_mmap.vmarea.end);
1084        assert_eq!(child_mmap.permissions, parent_test_mmap.permissions);
1085
1086        // Verify the data was copied correctly
1087        unsafe {
1088            let parent_ptr = mmap.pmarea.start as *const u8;
1089            let child_ptr = child_mmap.pmarea.start as *const u8;
1090            
1091            // Check that physical addresses are different (separate memory)
1092            assert_ne!(parent_ptr, child_ptr, "Parent and child should have different physical memory");
1093            
1094            // Check that the data content is identical
1095            for i in 0..test_data.len() {
1096                let parent_byte = *parent_ptr.offset(i as isize);
1097                let child_byte = *child_ptr.offset(i as isize);
1098                assert_eq!(parent_byte, child_byte, "Data mismatch at offset {}", i);
1099            }
1100        }
1101
1102        // Verify that modifying parent's memory doesn't affect child's memory
1103        unsafe {
1104            let parent_ptr = mmap.pmarea.start as *mut u8;
1105            let original_value = *parent_ptr;
1106            *parent_ptr = 0xFF; // Modify first byte in parent
1107            
1108            let child_ptr = child_mmap.pmarea.start as *const u8;
1109            let child_first_byte = *child_ptr;
1110            
1111            // Child's first byte should still be the original value
1112            assert_eq!(child_first_byte, original_value, "Child memory should be independent from parent");
1113        }
1114
1115        // Verify register states were copied
1116        assert_eq!(child_task.vcpu.get_pc(), parent_task.vcpu.get_pc());
1117        
1118        // Verify entry point was copied
1119        assert_eq!(child_task.entry, parent_task.entry);
1120
1121        // Verify state was copied
1122        assert_eq!(child_task.state, parent_task.state);
1123
1124        // Verify that both tasks have the correct number of managed pages
1125        assert!(child_task.managed_pages.len() >= num_pages, 
1126            "Child should have at least the test pages in managed pages");
1127    }
1128
1129    #[test_case]
1130    fn test_clone_task_stack_copy() {
1131        let mut parent_task = super::new_user_task("ParentWithStack".to_string(), 0);
1132        parent_task.init();
1133
1134        // Find the stack memory map in parent
1135        let stack_mmap = parent_task.vm_manager.get_memmap().iter()
1136            .find(|mmap| {
1137                // Stack should be near USER_STACK_TOP and have stack permissions
1138                use crate::vm::vmem::VirtualMemoryRegion;
1139                mmap.vmarea.end == crate::environment::USER_STACK_TOP - 1 && 
1140                mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1141            })
1142            .expect("Stack memory map not found in parent task")
1143            .clone();
1144
1145        // Write test data to parent's stack
1146        let stack_test_data: [u8; 16] = [
1147            0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22,
1148            0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0x00
1149        ];
1150        unsafe {
1151            let stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1152            core::ptr::copy_nonoverlapping(stack_test_data.as_ptr(), stack_ptr, stack_test_data.len());
1153        }
1154
1155        // Clone the parent task
1156        let child_task = parent_task.clone_task().unwrap();
1157
1158        // Find the corresponding stack memory map in child
1159        let child_stack_mmap = child_task.vm_manager.get_memmap().iter()
1160            .find(|mmap| {
1161                use crate::vm::vmem::VirtualMemoryRegion;
1162                mmap.vmarea.start == stack_mmap.vmarea.start &&
1163                mmap.vmarea.end == stack_mmap.vmarea.end &&
1164                mmap.permissions == VirtualMemoryRegion::Stack.default_permissions()
1165            })
1166            .expect("Stack memory map not found in child task");
1167
1168        // Verify that stack content was copied correctly
1169        unsafe {
1170            let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1171            let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1172
1173            // Check that physical addresses are different (separate memory)
1174            assert_ne!(parent_stack_ptr, child_stack_ptr, 
1175                "Parent and child should have different stack physical memory");
1176
1177            // Check that the stack data content is identical
1178            for i in 0..stack_test_data.len() {
1179                let parent_byte = *parent_stack_ptr.offset(i as isize);
1180                let child_byte = *child_stack_ptr.offset(i as isize);
1181                assert_eq!(parent_byte, child_byte, 
1182                    "Stack data mismatch at offset {}: parent={:#x}, child={:#x}", 
1183                    i, parent_byte, child_byte);
1184            }
1185        }
1186
1187        // Verify that modifying parent's stack doesn't affect child's stack
1188        unsafe {
1189            let parent_stack_ptr = (stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *mut u8;
1190            let original_value = *parent_stack_ptr;
1191            *parent_stack_ptr = 0xFE; // Modify first byte in parent stack
1192
1193            let child_stack_ptr = (child_stack_mmap.pmarea.start + crate::environment::PAGE_SIZE) as *const u8;
1194            let child_first_byte = *child_stack_ptr;
1195
1196            // Child's first byte should still be the original value
1197            assert_eq!(child_first_byte, original_value, 
1198                "Child stack should be independent from parent stack");
1199        }
1200
1201        // Verify stack sizes match
1202        assert_eq!(child_task.stack_size, parent_task.stack_size,
1203            "Child and parent should have the same stack size");
1204    }
1205
1206    #[test_case]
1207    fn test_clone_task_shared_memory() {
1208        use crate::vm::vmem::{VirtualMemoryMap, MemoryArea, VirtualMemoryPermission};
1209        use crate::mem::page::allocate_raw_pages;
1210        use crate::environment::PAGE_SIZE;
1211        
1212        let mut parent_task = super::new_user_task("ParentWithShared".to_string(), 0);
1213        parent_task.init();
1214
1215        // Manually add a shared memory region to test sharing behavior
1216        let shared_vaddr = 0x5000;
1217        let num_pages = 1;
1218        let pages = allocate_raw_pages(num_pages);
1219        let paddr = pages as usize;
1220        
1221        let shared_mmap = VirtualMemoryMap {
1222            pmarea: MemoryArea {
1223                start: paddr,
1224                end: paddr + PAGE_SIZE - 1,
1225            },
1226            vmarea: MemoryArea {
1227                start: shared_vaddr,
1228                end: shared_vaddr + PAGE_SIZE - 1,
1229            },
1230            permissions: VirtualMemoryPermission::Read as usize | VirtualMemoryPermission::Write as usize,
1231            is_shared: true, // This should be shared between parent and child
1232        };
1233        
1234        // Add shared memory map to parent
1235        parent_task.vm_manager.add_memory_map(shared_mmap).unwrap();
1236        
1237        // Write test data to shared memory
1238        let test_data: [u8; 8] = [0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, 0x11, 0x22];
1239        unsafe {
1240            let shared_ptr = paddr as *mut u8;
1241            core::ptr::copy_nonoverlapping(test_data.as_ptr(), shared_ptr, test_data.len());
1242        }
1243
1244        // Clone the parent task
1245        let child_task = parent_task.clone_task().unwrap();
1246
1247        // Find the shared memory map in child
1248        let child_shared_mmap = child_task.vm_manager.get_memmap().iter()
1249            .find(|mmap| mmap.vmarea.start == shared_vaddr && mmap.is_shared)
1250            .expect("Shared memory map not found in child task");
1251
1252        // Verify that the physical addresses are the same (shared memory)
1253        assert_eq!(child_shared_mmap.pmarea.start, shared_mmap.pmarea.start,
1254            "Shared memory should have the same physical address in parent and child");
1255        
1256        // Verify that the virtual addresses are the same
1257        assert_eq!(child_shared_mmap.vmarea.start, shared_mmap.vmarea.start);
1258        assert_eq!(child_shared_mmap.vmarea.end, shared_mmap.vmarea.end);
1259        
1260        // Verify that is_shared flag is preserved
1261        assert!(child_shared_mmap.is_shared, "Shared memory should remain marked as shared");
1262
1263        // Verify that modifying shared memory from child affects parent
1264        unsafe {
1265            let child_shared_ptr = child_shared_mmap.pmarea.start as *mut u8;
1266            let original_value = *child_shared_ptr;
1267            *child_shared_ptr = 0xFF; // Modify first byte through child reference
1268            
1269            let parent_shared_ptr = shared_mmap.pmarea.start as *const u8;
1270            let parent_first_byte = *parent_shared_ptr;
1271            
1272            // Parent should see the change made by child (shared memory)
1273            assert_eq!(parent_first_byte, 0xFF, 
1274                "Parent should see changes made through child's shared memory reference");
1275                
1276            // Restore original value
1277            *child_shared_ptr = original_value;
1278        }
1279        
1280        // Verify that the shared data content is accessible from both
1281        unsafe {
1282            let child_ptr = child_shared_mmap.pmarea.start as *const u8;
1283            let parent_ptr = shared_mmap.pmarea.start as *const u8;
1284            
1285            // Check that the data content is identical and accessible from both
1286            for i in 0..test_data.len() {
1287                let parent_byte = *parent_ptr.offset(i as isize);
1288                let child_byte = *child_ptr.offset(i as isize);
1289                assert_eq!(parent_byte, child_byte, 
1290                    "Shared memory data should be identical from both parent and child views");
1291            }
1292        }
1293    }
1294}