kernel/arch/riscv64/vm/mmu/
sv48.rs1use core::{arch::asm, mem::transmute};
2use core::result::Result;
3
4use crate::environment::PAGE_SIZE;
5use crate::vm::vmem::VirtualMemoryPermission;
6use crate::{arch::vm::{get_page_table, new_page_table_idx}, vm::vmem::VirtualMemoryMap};
7
8const MAX_PAGING_LEVEL: usize = 3;
9
10#[repr(align(8))]
11#[derive(Clone, Copy)]
12pub struct PageTableEntry {
13 pub entry: u64,
14}
15
16impl PageTableEntry {
17 pub const fn new() -> Self {
18 PageTableEntry { entry: 0 }
19 }
20
21 pub fn get_ppn(&self) -> usize {
22 (self.entry >> 10) as usize
23 }
24
25 pub fn get_flags(&self) -> u64 {
26 self.entry & 0xfff
27 }
28
29 pub fn is_valid(&self) -> bool {
30 self.entry & 1 == 1
31 }
32
33 pub fn is_leaf(&self) -> bool {
34 let flags = self.entry & 0b1110;
35 !(flags == 0)
36 }
37
38 pub fn validate(&mut self) {
39 self.entry |= 1;
40 }
41
42 pub fn invalidate(&mut self) {
43 self.entry &= !1;
44 }
45
46 pub fn set_ppn(&mut self, ppn: usize) -> &mut Self {
47 let mask = 0xFFFFFFFFFFF;
48 self.entry &= !(mask << 10);
49 self.entry |= (ppn as u64) << 10;
50 self
51 }
52
53 pub fn set_flags(&mut self, flags: u64) -> &mut Self {
54 let mask = 0xff;
55 self.entry |= flags & mask;
56 self
57 }
58
59 pub fn clear_flags(&mut self) -> &mut Self {
60 self.entry &= !0xff;
61 self
62 }
63
64 pub fn writable(&mut self) -> &mut Self {
65 self.entry |= 0x4;
66 self
67 }
68
69 pub fn readable(&mut self) -> &mut Self {
70 self.entry |= 0x2;
71 self
72 }
73
74 pub fn executable(&mut self) -> &mut Self {
75 self.entry |= 0x8;
76 self
77 }
78
79 pub fn accesible_from_user(&mut self) -> &mut Self {
80 self.entry |= 0x10;
81 self
82 }
83}
84
85#[repr(align(4096))]
86pub struct PageTable {
87 pub entries: [PageTableEntry; 512],
88}
89
90impl PageTable {
91 pub const fn new() -> Self {
92 PageTable {
93 entries: [PageTableEntry::new(); 512],
94 }
95 }
96
97 pub fn switch(&self, asid: usize) {
98 let satp = self.get_val_for_satp(asid);
99 unsafe {
100 asm!(
101 "
102 csrw satp, {0}
103 sfence.vma
104 ",
105
106 in(reg) satp,
107 );
108 }
109 }
110
111 pub fn get_val_for_satp(&self, asid: usize) -> u64 {
117 let mode = 9;
118 let ppn = self as *const _ as usize >> 12;
119 (mode << 60 | asid << 44 | ppn) as u64
120 }
121
122 fn get_next_level_table(&self, index: usize) -> &mut PageTable {
123 let addr = self.entries[index].get_ppn() << 12;
124 unsafe { transmute(addr) }
125 }
126
127 pub fn map_memory_area(&mut self, mmap: VirtualMemoryMap) -> Result<(), &'static str> {
128 if mmap.vmarea.start % PAGE_SIZE != 0 || mmap.pmarea.start % PAGE_SIZE != 0 ||
130 mmap.vmarea.size() % PAGE_SIZE != 0 || mmap.pmarea.size() % PAGE_SIZE != 0 {
131 return Err("Address is not aligned to PAGE_SIZE");
132 }
133
134 let mut vaddr = mmap.vmarea.start;
135 let mut paddr = mmap.pmarea.start;
136 while vaddr + (PAGE_SIZE - 1) <= mmap.vmarea.end {
137 self.map(vaddr, paddr, mmap.permissions);
138 match vaddr.checked_add(PAGE_SIZE) {
139 Some(addr) => vaddr = addr,
140 None => break,
141 }
142 match paddr.checked_add(PAGE_SIZE) {
143 Some(addr) => paddr = addr,
144 None => break,
145 }
146 }
147
148 Ok(())
149 }
150
151 pub fn map(&mut self, vaddr: usize, paddr: usize, permissions: usize) {
153 let vaddr = vaddr & 0xffff_ffff_ffff_f000;
154 let paddr = paddr & 0xffff_ffff_ffff_f000;
155 for i in (0..=MAX_PAGING_LEVEL).rev() {
156 let pagetable = self.walk(vaddr, MAX_PAGING_LEVEL);
157 match pagetable {
158 Ok((pagetable, level)) => {
159 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
160 let ppn = (paddr >> 12) & 0xfffffffffff;
161 let entry = &mut pagetable.entries[vpn];
162 if VirtualMemoryPermission::Read.contained_in(permissions) {
163 entry.readable();
164 }
165 if VirtualMemoryPermission::Write.contained_in(permissions) {
166 entry.writable();
167 }
168 if VirtualMemoryPermission::Execute.contained_in(permissions) {
169 entry.executable();
170 }
171 if VirtualMemoryPermission::User.contained_in(permissions) {
172 entry.accesible_from_user();
173 }
174 entry
175 .set_ppn(ppn)
176 .validate();
177 unsafe { asm!("sfence.vma") };
178 break;
179 }
180 Err(t) => {
181 let vpn = vaddr >> (12 + 9 * i) & 0x1ff;
182 let entry = &mut t.entries[vpn];
183 let next_table_idx = new_page_table_idx();
184 let next_table = get_page_table(next_table_idx).unwrap();
185 entry
186 .set_ppn(next_table as *const _ as usize >> 12)
187 .validate();
188 }
189 }
190 }
191 }
192
193 fn walk(&mut self, vaddr: usize, level: usize) -> Result<(&mut PageTable, usize), &mut PageTable> {
194 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
195 let entry = &self.entries[vpn];
196
197 if entry.is_leaf() || level == 0 {
198 return Ok((self, level));
199 }
200
201 if !entry.is_valid() {
202 return Err(self);
203 }
204
205 let next_level_table = self.get_next_level_table(vpn);
206 next_level_table.walk(vaddr, level - 1)
207 }
208
209 pub fn unmap(&mut self, vaddr: usize) {
210 let vaddr = vaddr & 0xffff_ffff_ffff_f000;
211 let pagetable = self.walk(vaddr, MAX_PAGING_LEVEL);
212 match pagetable {
213 Ok((pagetable, level)) => {
214 let vpn = (vaddr >> (12 + 9 * level)) & 0x1ff;
215 let entry = &mut pagetable.entries[vpn];
216 entry.invalidate();
217 unsafe { asm!("sfence.vma") };
218 }
219 Err(_) => {}
220 }
221 }
222
223 pub fn unmap_all(&mut self) {
224 for i in 0..512 {
225 let entry = &mut self.entries[i];
226 entry.invalidate();
227 }
228 unsafe { asm!("sfence.vma") };
230 }
231}