diff --git a/xernel/kernel/src/main.rs b/xernel/kernel/src/main.rs index 7aacd695..33095f27 100644 --- a/xernel/kernel/src/main.rs +++ b/xernel/kernel/src/main.rs @@ -170,7 +170,8 @@ extern "C" fn kernel_main() -> ! { PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, true, ); - process.lock().get_page_table().unwrap().map( + let mut pm = process.lock().get_page_table().unwrap(); + pm.map( page, Page::from_start_address(VirtAddr::new(0x200000)).unwrap(), PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, diff --git a/xernel/kernel/src/mem/mmap.rs b/xernel/kernel/src/mem/mmap.rs index 02540c6c..fb72f308 100644 --- a/xernel/kernel/src/mem/mmap.rs +++ b/xernel/kernel/src/mem/mmap.rs @@ -30,8 +30,7 @@ pub fn mmap( match flags { MapFlags::ANONYMOUS => { - let start_address = process.vm().find_next_start_address(); - process.vm().add_entry(start_address, len, prot, flags); + let start_address = process.vm().create_entry_at(addr, len, prot, flags); Ok(start_address.as_u64() as isize) } @@ -60,7 +59,7 @@ pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool let base_addr = addr.align_down(Size4KiB::SIZE); let frame = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); - let pt_flags = ptflags_from_protflags(vm_entry.prot); + let pt_flags = ptflags_from_protflags(vm_entry.prot, process.page_table.is_some()); let mut pt = process.get_page_table().unwrap(); pt.map::(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true); diff --git a/xernel/kernel/src/mem/mod.rs b/xernel/kernel/src/mem/mod.rs index d8dc4ec2..b29f1abc 100644 --- a/xernel/kernel/src/mem/mod.rs +++ b/xernel/kernel/src/mem/mod.rs @@ -13,12 +13,12 @@ pub static HIGHER_HALF_OFFSET: Once = Once::new(); pub const KERNEL_OFFSET: u64 = 0xffff_ffff_8000_0000; pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000; -// NOTE: there are 16 TB available for mmap until it overflows into the non-canonical address space -pub const MMAP_START_ADDR: usize = 0x0000_7000_0000_0000; // NOTE: stack grows down pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000; -pub const USER_THREAD_STACK_TOP: u64 = 0x0000_ffff_ffff_f000; + +pub const PROCESS_START: u64 = 0x0000_0000_0040_0000; +pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000; pub const STACK_SIZE: u64 = 0x40000; pub const FRAME_SIZE: u64 = 4096; diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index d1c11266..d40627d4 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -9,7 +9,7 @@ use limine::KernelAddressRequest; use x86_64::{ align_down, registers::control::{Cr3, Cr3Flags}, - structures::paging::{Page, PageSize, Size1GiB, Size2MiB, Size4KiB}, + structures::paging::{Page, PageSize, PageTableIndex, Size1GiB, Size2MiB, Size4KiB}, }; use x86_64::{ structures::paging::{PageTable, PageTableFlags, PhysFrame}, @@ -293,11 +293,83 @@ impl Pagemap { (*pml1)[virt.p1_index()].set_unused(); } } + + unsafe fn get_pt(pt: *mut PageTable, pt_index: PageTableIndex) -> *mut PageTable { + (*pt)[pt_index].addr().as_u64() as *mut PageTable + } + + /// Only works with 4KiB pages + pub fn translate(&self, virt: VirtAddr) -> Option { + let pml4 = self.page_table; + + unsafe { + if !(*pml4)[virt.p4_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml3 = Self::get_pt(pml4, virt.p4_index()); + + if !(*pml3)[virt.p3_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml2 = Self::get_pt(pml3, virt.p3_index()); + + if !(*pml2)[virt.p2_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml1 = Self::get_pt(pml2, virt.p2_index()); + + if !(*pml1)[virt.p1_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + Some((*pml1)[virt.p1_index()].addr() + u64::from(virt.page_offset())) + } + } + + fn deallocate_pt(pt: *mut PageTable, level: u8) { + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + if level == 4 { + for i in 0..128 { + unsafe { + if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { + let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; + + Self::deallocate_pt(pt, level - 1); + } + } + } + + unsafe { + frame_allocator.deallocate_frame( + PhysFrame::::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(), + ); + } + } else if level > 1 { + for i in 0..256 { + unsafe { + if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { + let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; + + Self::deallocate_pt(pt, level - 1); + } + } + } + + unsafe { + frame_allocator.deallocate_frame( + PhysFrame::::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(), + ); + } + } + } } impl Drop for Pagemap { fn drop(&mut self) { - todo!("drop pagemap") + Self::deallocate_pt(self.page_table, 4); } } diff --git a/xernel/kernel/src/mem/vm.rs b/xernel/kernel/src/mem/vm.rs index f4691c9e..a036a977 100644 --- a/xernel/kernel/src/mem/vm.rs +++ b/xernel/kernel/src/mem/vm.rs @@ -1,17 +1,20 @@ -use alloc::vec::Vec; +use alloc::collections::BTreeMap; use libxernel::syscall::{MapFlags, ProtectionFlags}; -use x86_64::align_up; -use x86_64::structures::paging::PageTableFlags; +use x86_64::structures::paging::{PageTableFlags, PhysFrame}; use x86_64::{ structures::paging::{PageSize, Size4KiB}, VirtAddr, }; -use super::MMAP_START_ADDR; +use crate::mem::PROCESS_END; +use crate::sched::scheduler::Scheduler; + +use super::frame::FRAME_ALLOCATOR; +use super::{PROCESS_START, STACK_SIZE}; pub struct VmEntry { - start: VirtAddr, - length: usize, + pub start: VirtAddr, + pub length: usize, pub prot: ProtectionFlags, pub flags: MapFlags, // TODO: add something to represent to which file this entry belongs to @@ -22,55 +25,175 @@ impl VmEntry { pub fn end(&self) -> VirtAddr { self.start + self.length } + + pub fn unmap(&self) { + let process = Scheduler::current_process(); + let process = process.lock(); + + // SAFETY: only userspace processes should have Vm mappings + let mut page_mapper = process.get_page_table().unwrap(); + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + + for page in (self.start..self.end()).step_by(Size4KiB::SIZE as usize) { + if let Some(phys_addr) = page_mapper.translate(page) { + unsafe { + frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); + } + } + + page_mapper.unmap(page); + } + } } pub struct Vm { - entries: Vec, + entries: BTreeMap, } impl Vm { pub const fn new() -> Self { - Self { entries: Vec::new() } + Self { + entries: BTreeMap::new(), + } } - pub fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) { - self.entries.push(VmEntry { + fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) { + let entry = VmEntry { start, length, prot, flags, file: None, - }); + }; + + self.entries.insert(start, entry); + } + + pub fn is_available(&self, start: VirtAddr, length: usize) -> bool { + !self.entries.iter().any(|(_, entry)| { + entry.start < start && entry.end() + Size4KiB::SIZE > start + || start + length + Size4KiB::SIZE > entry.start + && (start + length + Size4KiB::SIZE).as_u64() < Size4KiB::SIZE + }) + } + + pub fn create_entry_low(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { + self.create_entry_at(VirtAddr::new(PROCESS_START), length, prot, flags) } - pub fn find_next_start_address(&self) -> VirtAddr { - let last_entry = self.entries.last(); + pub fn create_entry_high(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { + let mut start_address = VirtAddr::new(PROCESS_END - length as u64); + + loop { + if self.is_available(start_address, length) { + if start_address.as_u64() < PROCESS_START { + panic!( + "create_entry_high: {:x}(length = {}) is out of bounds", + start_address, length + ); + } + + self.add_entry(start_address, length, prot, flags); + return start_address; + } - if let Some(last_entry) = last_entry { - VirtAddr::new(align_up(last_entry.end().as_u64(), Size4KiB::SIZE)) - } else { - VirtAddr::new(MMAP_START_ADDR as u64) + // NOTE: at the moment only a stack should be create at the high end of the process address space + start_address -= STACK_SIZE; + } + } + + /// A new entry is created at the given address or higher + pub fn create_entry_at( + &mut self, + mut start: VirtAddr, + length: usize, + prot: ProtectionFlags, + flags: MapFlags, + ) -> VirtAddr { + if start.as_u64() + length as u64 > PROCESS_END { + panic!("create_entry_at: {:x}(length = {}) is out of bounds", start, length); + } + + if !start.is_aligned(Size4KiB::SIZE) { + panic!("create_entry_at: {:x} is not aligned", start); + } + + if start.as_u64() < PROCESS_START { + start = VirtAddr::new(PROCESS_START); + } + + if self.is_available(start, length) { + self.add_entry(start, length, prot, flags); + return start; + } + + let mut values_iter = self.entries.values(); + let mut previous = values_iter.next().unwrap(); + let current = values_iter.next(); + + if current.is_none() { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + let mut current = current.unwrap(); + + loop { + if current.start - previous.end() >= length as u64 + 2 * Size4KiB::SIZE { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + previous = current; + let current_opt = values_iter.next(); + + if current_opt.is_none() { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + if new_start.as_u64() + length as u64 > PROCESS_END { + panic!( + "create_entry_at: {:x}(length = {}) is out of bounds! Vm space is exhausted", + new_start, length + ); + } + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + current = current_opt.unwrap(); } } pub fn get_entry_from_address(&self, addr: VirtAddr) -> Option<&VmEntry> { self.entries .iter() - .find(|entry| entry.start <= addr && entry.end() > addr) + .find(|(_, entry)| entry.start <= addr && entry.end() > addr) + .map(|(_, entry)| entry) } pub fn clean_up(&mut self) { - todo!("clean up all mappings and free memory") - // NOTE: don't forget to remove the entries from the vector + self.entries.values().for_each(|value| value.unmap()); + self.entries.clear(); } } -pub fn ptflags_from_protflags(flags: ProtectionFlags) -> PageTableFlags { +pub fn ptflags_from_protflags(flags: ProtectionFlags, user_accessible: bool) -> PageTableFlags { let mut new_flags = PageTableFlags::PRESENT; - if flags.contains(ProtectionFlags::READ) { - // TODO: how to handle this?? - todo!("PageTableFlags::READ") + if user_accessible { + new_flags |= PageTableFlags::USER_ACCESSIBLE; + } + + if !flags.contains(ProtectionFlags::READ) { + // NOTE: it is not possible to remove read access from a page } if flags.contains(ProtectionFlags::WRITE) { diff --git a/xernel/kernel/src/sched/process.rs b/xernel/kernel/src/sched/process.rs index 5e16880d..8b25c3fe 100644 --- a/xernel/kernel/src/sched/process.rs +++ b/xernel/kernel/src/sched/process.rs @@ -1,5 +1,6 @@ use alloc::sync::Weak; use core::sync::atomic::{AtomicUsize, Ordering}; +use libxernel::syscall::{MapFlags, ProtectionFlags}; use x86_64::structures::paging::{Page, PageSize, PageTableFlags, Size4KiB}; use x86_64::VirtAddr; @@ -7,7 +8,7 @@ use crate::fs::file::File; use crate::fs::vnode::VNode; use crate::mem::frame::FRAME_ALLOCATOR; use crate::mem::vm::Vm; -use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE, USER_THREAD_STACK_TOP}; +use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE}; use crate::VFS; use alloc::collections::BTreeMap; use alloc::sync::Arc; @@ -18,8 +19,6 @@ use libxernel::sync::{Once, Spinlock}; use crate::mem::paging::{Pagemap, KERNEL_PAGE_MAPPER}; use crate::sched::thread::Thread; -use libxernel::syscall::{MapFlags, ProtectionFlags}; - /// Ongoing counter for the ProcessID static PROCESS_ID_COUNTER: AtomicUsize = AtomicUsize::new(0); @@ -34,7 +33,6 @@ pub struct Process { pub threads: Vec>>, pub fds: BTreeMap, pub kernel_thread_stack_top: usize, - pub user_thread_stack_top: usize, pub thread_id_counter: usize, pub vm: Vm, pub cwd: Arc>, @@ -58,7 +56,6 @@ impl Process { threads: Vec::new(), fds: BTreeMap::new(), kernel_thread_stack_top: KERNEL_THREAD_STACK_TOP as usize, - user_thread_stack_top: USER_THREAD_STACK_TOP as usize, thread_id_counter: 0, vm: Vm::new(), cwd: VFS.lock().root_node(), @@ -85,23 +82,19 @@ impl Process { ); } - self.vm.add_entry( - VirtAddr::new(stack_bottom as u64), - STACK_SIZE as usize, - ProtectionFlags::READ | ProtectionFlags::WRITE, - MapFlags::ANONYMOUS, - ); - stack_top } pub fn new_user_stack(&mut self) -> usize { - let stack_top = self.user_thread_stack_top; - self.user_thread_stack_top -= STACK_SIZE as usize; - let stack_bottom = self.user_thread_stack_top; - - // create guard page - self.user_thread_stack_top -= Size4KiB::SIZE as usize; + let stack_bottom = self + .vm + .create_entry_high( + STACK_SIZE as usize, + ProtectionFlags::READ | ProtectionFlags::WRITE, + MapFlags::ANONYMOUS, + ) + .as_u64() as usize; + let stack_top = STACK_SIZE as usize + stack_bottom; for addr in (stack_bottom..stack_top).step_by(Size4KiB::SIZE as usize) { let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); @@ -118,13 +111,6 @@ impl Process { ); } - self.vm.add_entry( - VirtAddr::new(stack_bottom as u64), - STACK_SIZE as usize, - ProtectionFlags::READ | ProtectionFlags::WRITE, - MapFlags::ANONYMOUS, - ); - stack_top } diff --git a/xernel/kernel/src/sched/thread.rs b/xernel/kernel/src/sched/thread.rs index d2cd373d..abd3d08f 100644 --- a/xernel/kernel/src/sched/thread.rs +++ b/xernel/kernel/src/sched/thread.rs @@ -1,5 +1,10 @@ use alloc::sync::Arc; use libxernel::sync::Spinlock; +use x86_64::structures::paging::{Page, PageSize, PhysFrame, Size4KiB}; + +use crate::mem::frame::FRAME_ALLOCATOR; +use crate::mem::paging::KERNEL_PAGE_MAPPER; +use crate::mem::STACK_SIZE; use super::context::CpuContext; use super::process::{Process, KERNEL_PROCESS}; @@ -167,3 +172,22 @@ impl Thread { self.process.upgrade() } } + +impl Drop for Thread { + fn drop(&mut self) { + if self.is_kernel_thread() { + let mut page_mapper = KERNEL_PAGE_MAPPER.lock(); + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + + for addr in (self.thread_stack..self.thread_stack + STACK_SIZE as usize).step_by(Size4KiB::SIZE as usize) { + unsafe { + let page = Page::::from_start_address(VirtAddr::new(addr as u64)).unwrap(); + let phys_addr = page_mapper.translate(page.start_address()).unwrap(); + + frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); + page_mapper.unmap(page.start_address()); + } + } + } + } +}