kernel/arch/riscv64/vm/mmu/
sv48.rs1use core::{arch::asm, mem::transmute};
2use core::result::Result;
3
4
5use crate::arch::vm::new_raw_pagetable;
6use crate::environment::PAGE_SIZE;
7use crate::vm::vmem::VirtualMemoryPermission;
8use crate::{vm::vmem::VirtualMemoryMap};
9
10const MAX_PAGING_LEVEL: usize = 3;
11
12#[repr(align(8))]
13#[derive(Clone, Copy, Debug)]
14pub struct PageTableEntry {
15 pub entry: u64,
16}
17
18impl PageTableEntry {
19 pub const fn new() -> Self {
20 PageTableEntry { entry: 0 }
21 }
22
23 pub fn get_ppn(&self) -> usize {
24 (self.entry >> 10) as usize
25 }
26
27 pub fn get_flags(&self) -> u64 {
28 self.entry & 0xfff
29 }
30
31 pub fn is_valid(&self) -> bool {
32 self.entry & 1 == 1
33 }
34
35 pub fn is_leaf(&self) -> bool {
36 let flags = self.entry & 0b1110;
37 !(flags == 0)
38 }
39
40 pub fn validate(&mut self) {
41 self.entry |= 1;
42 }
43
44 pub fn invalidate(&mut self) {
45 self.entry &= !1;
46 }
47
48 pub fn set_ppn(&mut self, ppn: usize) -> &mut Self {
49 let mask = 0xFFFFFFFFFFF;
50 self.entry &= !(mask << 10);
51 self.entry |= (ppn as u64) << 10;
52 self
53 }
54
55 pub fn set_flags(&mut self, flags: u64) -> &mut Self {
56 let mask = 0xff;
57 self.entry |= flags & mask;
58 self
59 }
60
61 pub fn clear_flags(&mut self) -> &mut Self {
62 self.entry &= !0xff;
63 self
64 }
65
66 pub fn writable(&mut self) -> &mut Self {
67 self.entry |= 0x4;
68 self
69 }
70
71 pub fn readable(&mut self) -> &mut Self {
72 self.entry |= 0x2;
73 self
74 }
75
76 pub fn executable(&mut self) -> &mut Self {
77 self.entry |= 0x8;
78 self
79 }
80
81 pub fn accesible_from_user(&mut self) -> &mut Self {
82 self.entry |= 0x10;
83 self
84 }
85}
86
87#[repr(align(4096))]
88#[derive(Debug)]
89pub struct PageTable {
90 pub entries: [PageTableEntry; 512],
91}
92
93impl PageTable {
94 pub const fn new() -> Self {
95 PageTable {
96 entries: [PageTableEntry::new(); 512],
97 }
98 }
99
100 pub fn switch(&self, asid: u16) {
101 let satp = self.get_val_for_satp(asid);
102 unsafe {
103 asm!(
104 "
105 csrw satp, {0}
106 sfence.vma
107 ",
108
109 in(reg) satp,
110 );
111 }
112 }
113
114 pub fn get_val_for_satp(&self, asid: u16) -> u64 {
120 let asid = asid as usize;
121 let mode = 9;
122 let ppn = self as *const _ as usize >> 12;
123 (mode << 60 | asid << 44 | ppn) as u64
124 }
125
126 fn get_next_level_table(&self, index: usize) -> &mut PageTable {
127 let addr = self.entries[index].get_ppn() << 12;
128 unsafe { transmute(addr) }
129 }
130
131 pub fn map_memory_area(&mut self, asid: u16, mmap: VirtualMemoryMap) -> Result<(), &'static str> {
132 if mmap.vmarea.start % PAGE_SIZE != 0 || mmap.pmarea.start % PAGE_SIZE != 0 ||
134 mmap.vmarea.size() % PAGE_SIZE != 0 || mmap.pmarea.size() % PAGE_SIZE != 0 {
135 return Err("Address is not aligned to PAGE_SIZE");
136 }
137
138 let mut vaddr = mmap.vmarea.start;
139 let mut paddr = mmap.pmarea.start;
140 while vaddr + (PAGE_SIZE - 1) <= mmap.vmarea.end {
141 self.map(asid, vaddr, paddr, mmap.permissions);
142 match vaddr.checked_add(PAGE_SIZE) {
143 Some(addr) => vaddr = addr,
144 None => break,
145 }
146 match paddr.checked_add(PAGE_SIZE) {
147 Some(addr) => paddr = addr,
148 None => break,
149 }
150 }
151
152 Ok(())
153 }
154
155 pub fn map(&mut self, asid: u16, vaddr: usize, paddr: usize, permissions: usize) {
157 let vaddr = vaddr & 0xffff_ffff_ffff_f000;
158 let paddr = paddr & 0xffff_ffff_ffff_f000;
159 for i in (0..=MAX_PAGING_LEVEL).rev() {
160 let pagetable = self.walk(vaddr, MAX_PAGING_LEVEL);
161 match pagetable {
162 Ok((pagetable, level)) => {
163 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
164 let ppn = (paddr >> 12) & 0xfffffffffff;
165 let entry = &mut pagetable.entries[vpn];
166 if VirtualMemoryPermission::Read.contained_in(permissions) {
167 entry.readable();
168 }
169 if VirtualMemoryPermission::Write.contained_in(permissions) {
170 entry.writable();
171 }
172 if VirtualMemoryPermission::Execute.contained_in(permissions) {
173 entry.executable();
174 }
175 if VirtualMemoryPermission::User.contained_in(permissions) {
176 entry.accesible_from_user();
177 }
178 entry
179 .set_ppn(ppn)
180 .validate();
181 unsafe { asm!("sfence.vma") };
182 break;
183 }
184 Err(t) => {
185 let vpn = vaddr >> (12 + 9 * i) & 0x1ff;
186 let entry = &mut t.entries[vpn];
187 let next_table_ptr = unsafe { new_raw_pagetable(asid as u16) };
188 entry
189 .set_ppn(next_table_ptr as usize >> 12)
190 .validate();
191 }
192 }
193 }
194 }
195
196 fn walk(&mut self, vaddr: usize, level: usize) -> Result<(&mut PageTable, usize), &mut PageTable> {
197 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
198 let entry = &self.entries[vpn];
199
200 if entry.is_leaf() || level == 0 {
201 return Ok((self, level));
202 }
203
204 if !entry.is_valid() {
205 return Err(self);
206 }
207
208 let next_level_table = self.get_next_level_table(vpn);
209 next_level_table.walk(vaddr, level - 1)
210 }
211
212 pub fn unmap(&mut self, vaddr: usize) {
213 let vaddr = vaddr & 0xffff_ffff_ffff_f000;
214 let pagetable = self.walk(vaddr, MAX_PAGING_LEVEL);
215 match pagetable {
216 Ok((pagetable, level)) => {
217 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
218 let entry = &mut pagetable.entries[vpn];
219 entry.invalidate();
220 unsafe { asm!("sfence.vma") };
221 }
222 Err(_) => {}
223 }
224 }
225
226 pub fn unmap_all(&mut self) {
227 for i in 0..512 {
228 let entry = &mut self.entries[i];
229 entry.invalidate();
230 }
231 unsafe { asm!("sfence.vma") };
233 }
234}