diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index d1c11266..d69d8163 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -293,6 +293,37 @@ impl Pagemap { (*pml1)[virt.p1_index()].set_unused(); } } + + /// Only works with 4KiB pages + pub fn translate(&self, virt: VirtAddr) -> Option { + let pml4 = self.page_table; + + unsafe { + if !(*pml4)[virt.p4_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml3 = (*pml4)[virt.p4_index()].addr().as_u64() as *mut PageTable; + + if !(*pml3)[virt.p3_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml2 = (*pml3)[virt.p3_index()].addr().as_u64() as *mut PageTable; + + if !(*pml2)[virt.p2_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml1 = (*pml2)[virt.p2_index()].addr().as_u64() as *mut PageTable; + + if !(*pml1)[virt.p1_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + Some((*pml1)[virt.p1_index()].addr() + u64::from(virt.page_offset())) + } + } } impl Drop for Pagemap { diff --git a/xernel/kernel/src/mem/vm.rs b/xernel/kernel/src/mem/vm.rs index 2652d476..a036a977 100644 --- a/xernel/kernel/src/mem/vm.rs +++ b/xernel/kernel/src/mem/vm.rs @@ -1,13 +1,15 @@ use alloc::collections::BTreeMap; use libxernel::syscall::{MapFlags, ProtectionFlags}; -use x86_64::structures::paging::PageTableFlags; +use x86_64::structures::paging::{PageTableFlags, PhysFrame}; use x86_64::{ structures::paging::{PageSize, Size4KiB}, VirtAddr, }; use crate::mem::PROCESS_END; +use crate::sched::scheduler::Scheduler; +use super::frame::FRAME_ALLOCATOR; use super::{PROCESS_START, STACK_SIZE}; pub struct VmEntry { @@ -23,6 +25,25 @@ impl VmEntry { pub fn end(&self) -> VirtAddr { self.start + self.length } + + pub fn unmap(&self) { + let process = Scheduler::current_process(); + let process = process.lock(); + + // SAFETY: only userspace processes should have Vm mappings + let mut page_mapper = process.get_page_table().unwrap(); + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + + for page in (self.start..self.end()).step_by(Size4KiB::SIZE as usize) { + if let Some(phys_addr) = page_mapper.translate(page) { + unsafe { + frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); + } + } + + page_mapper.unmap(page); + } + } } pub struct Vm { @@ -64,7 +85,7 @@ impl Vm { let mut start_address = VirtAddr::new(PROCESS_END - length as u64); loop { - if self.is_available(start_address, length) { + if self.is_available(start_address, length) { if start_address.as_u64() < PROCESS_START { panic!( "create_entry_high: {:x}(length = {}) is out of bounds", @@ -159,8 +180,8 @@ impl Vm { } pub fn clean_up(&mut self) { - todo!("clean up all mappings and free memory") - // NOTE: don't forget to remove the entries from the vector + self.entries.values().for_each(|value| value.unmap()); + self.entries.clear(); } }