From 8e4e67862ead9859c99e8864398493330335d24f Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 20:35:13 +0100 Subject: [PATCH 01/12] use treemap for vm --- xernel/kernel/src/mem/mmap.rs | 5 +- xernel/kernel/src/mem/mod.rs | 6 +- xernel/kernel/src/mem/vm.rs | 144 ++++++++++++++++++++++++----- xernel/kernel/src/sched/process.rs | 37 +++----- 4 files changed, 141 insertions(+), 51 deletions(-) diff --git a/xernel/kernel/src/mem/mmap.rs b/xernel/kernel/src/mem/mmap.rs index 02540c6c..ee483dc1 100644 --- a/xernel/kernel/src/mem/mmap.rs +++ b/xernel/kernel/src/mem/mmap.rs @@ -30,8 +30,7 @@ pub fn mmap( match flags { MapFlags::ANONYMOUS => { - let start_address = process.vm().find_next_start_address(); - process.vm().add_entry(start_address, len, prot, flags); + let start_address = process.vm().create_entry_at(addr, len, prot, flags); Ok(start_address.as_u64() as isize) } @@ -60,7 +59,7 @@ pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool let base_addr = addr.align_down(Size4KiB::SIZE); let frame = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); - let pt_flags = ptflags_from_protflags(vm_entry.prot); + let pt_flags = ptflags_from_protflags(vm_entry.prot, true); // TODO: don't hardcode user_accessible let mut pt = process.get_page_table().unwrap(); pt.map::(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true); diff --git a/xernel/kernel/src/mem/mod.rs b/xernel/kernel/src/mem/mod.rs index d8dc4ec2..fcfecfaa 100644 --- a/xernel/kernel/src/mem/mod.rs +++ b/xernel/kernel/src/mem/mod.rs @@ -13,12 +13,12 @@ pub static HIGHER_HALF_OFFSET: Once = Once::new(); pub const KERNEL_OFFSET: u64 = 0xffff_ffff_8000_0000; pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000; -// NOTE: there are 16 TB available for mmap until it overflows into the non-canonical address space -pub const MMAP_START_ADDR: usize = 0x0000_7000_0000_0000; // NOTE: stack grows down pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000; -pub const USER_THREAD_STACK_TOP: u64 = 0x0000_ffff_ffff_f000; + +pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000; +pub const PROCESS_START: u64 = 0x0000_0001_0000_0000; pub const STACK_SIZE: u64 = 0x40000; pub const FRAME_SIZE: u64 = 4096; diff --git a/xernel/kernel/src/mem/vm.rs b/xernel/kernel/src/mem/vm.rs index f4691c9e..b148e1e5 100644 --- a/xernel/kernel/src/mem/vm.rs +++ b/xernel/kernel/src/mem/vm.rs @@ -1,17 +1,18 @@ -use alloc::vec::Vec; +use alloc::collections::BTreeMap; use libxernel::syscall::{MapFlags, ProtectionFlags}; -use x86_64::align_up; use x86_64::structures::paging::PageTableFlags; use x86_64::{ structures::paging::{PageSize, Size4KiB}, VirtAddr, }; -use super::MMAP_START_ADDR; +use crate::mem::PROCESS_END; + +use super::{PROCESS_START, STACK_SIZE}; pub struct VmEntry { - start: VirtAddr, - length: usize, + pub start: VirtAddr, + pub length: usize, pub prot: ProtectionFlags, pub flags: MapFlags, // TODO: add something to represent to which file this entry belongs to @@ -25,38 +26,136 @@ impl VmEntry { } pub struct Vm { - entries: Vec, + entries: BTreeMap, } impl Vm { pub const fn new() -> Self { - Self { entries: Vec::new() } + Self { + entries: BTreeMap::new(), + } } - pub fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) { - self.entries.push(VmEntry { + fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) { + let entry = VmEntry { start, length, prot, flags, file: None, - }); + }; + + self.entries.insert(start, entry); + } + + pub fn is_available(&self, start: VirtAddr, length: usize) -> bool { + self.entries.iter().any(|(_, entry)| { + entry.start < start && entry.end() + Size4KiB::SIZE > start + || start + length + Size4KiB::SIZE > entry.start + && (start + length + Size4KiB::SIZE).as_u64() < Size4KiB::SIZE + }) } - pub fn find_next_start_address(&self) -> VirtAddr { - let last_entry = self.entries.last(); + pub fn create_entry_low(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { + self.create_entry_at(VirtAddr::new(PROCESS_START), length, prot, flags) + } + + pub fn create_entry_high(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr { + let mut start_address = VirtAddr::new(PROCESS_END - length as u64); + + loop { + if self.is_available(start_address, length) { + if start_address.as_u64() < PROCESS_START { + panic!( + "create_entry_high: {:x}(length = {}) is out of bounds", + start_address, length + ); + } + + self.add_entry(start_address, length, prot, flags); + return start_address; + } - if let Some(last_entry) = last_entry { - VirtAddr::new(align_up(last_entry.end().as_u64(), Size4KiB::SIZE)) - } else { - VirtAddr::new(MMAP_START_ADDR as u64) + // NOTE: at the moment only a stack should be create at the high end of the process address space + start_address -= STACK_SIZE; + } + } + + /// A new entry is created at the given address or higher + pub fn create_entry_at( + &mut self, + mut start: VirtAddr, + length: usize, + prot: ProtectionFlags, + flags: MapFlags, + ) -> VirtAddr { + if start.as_u64() + length as u64 > PROCESS_END { + panic!("create_entry_at: {:x}(length = {}) is out of bounds", start, length); + } + + if !start.is_aligned(Size4KiB::SIZE) { + panic!("create_entry_at: {:x} is not aligned", start); + } + + if start.as_u64() < PROCESS_START { + start = VirtAddr::new(PROCESS_START); + } + + if self.is_available(start, length) { + self.add_entry(start, length, prot, flags); + return start; + } + + let mut values_iter = self.entries.values(); + let mut previous = values_iter.next().unwrap(); + let current = values_iter.next(); + + if current.is_none() { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + let mut current = current.unwrap(); + + loop { + if current.start - previous.end() >= length as u64 + 2 * Size4KiB::SIZE { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + previous = current; + let current_opt = values_iter.next(); + + if current_opt.is_none() { + let new_start = previous.end() + Size4KiB::SIZE; + let new_start = new_start.align_up(Size4KiB::SIZE); + + if new_start.as_u64() + length as u64 > PROCESS_END { + panic!( + "create_entry_at: {:x}(length = {}) is out of bounds! Vm space is exhausted", + new_start, length + ); + } + + self.add_entry(new_start, length, prot, flags); + return new_start; + } + + current = current_opt.unwrap(); } } pub fn get_entry_from_address(&self, addr: VirtAddr) -> Option<&VmEntry> { self.entries .iter() - .find(|entry| entry.start <= addr && entry.end() > addr) + .find(|(_, entry)| entry.start <= addr && entry.end() > addr) + .map(|(_, entry)| entry) } pub fn clean_up(&mut self) { @@ -65,12 +164,15 @@ impl Vm { } } -pub fn ptflags_from_protflags(flags: ProtectionFlags) -> PageTableFlags { +pub fn ptflags_from_protflags(flags: ProtectionFlags, user_accessible: bool) -> PageTableFlags { let mut new_flags = PageTableFlags::PRESENT; - if flags.contains(ProtectionFlags::READ) { - // TODO: how to handle this?? - todo!("PageTableFlags::READ") + if user_accessible { + new_flags |= PageTableFlags::USER_ACCESSIBLE; + } + + if !flags.contains(ProtectionFlags::READ) { + // NOTE: it is not possible to remove read access from a page } if flags.contains(ProtectionFlags::WRITE) { diff --git a/xernel/kernel/src/sched/process.rs b/xernel/kernel/src/sched/process.rs index 5e16880d..36401ffe 100644 --- a/xernel/kernel/src/sched/process.rs +++ b/xernel/kernel/src/sched/process.rs @@ -1,5 +1,6 @@ use alloc::sync::Weak; use core::sync::atomic::{AtomicUsize, Ordering}; +use libxernel::syscall::{MapFlags, ProtectionFlags}; use x86_64::structures::paging::{Page, PageSize, PageTableFlags, Size4KiB}; use x86_64::VirtAddr; @@ -7,7 +8,7 @@ use crate::fs::file::File; use crate::fs::vnode::VNode; use crate::mem::frame::FRAME_ALLOCATOR; use crate::mem::vm::Vm; -use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE, USER_THREAD_STACK_TOP}; +use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE}; use crate::VFS; use alloc::collections::BTreeMap; use alloc::sync::Arc; @@ -18,8 +19,6 @@ use libxernel::sync::{Once, Spinlock}; use crate::mem::paging::{Pagemap, KERNEL_PAGE_MAPPER}; use crate::sched::thread::Thread; -use libxernel::syscall::{MapFlags, ProtectionFlags}; - /// Ongoing counter for the ProcessID static PROCESS_ID_COUNTER: AtomicUsize = AtomicUsize::new(0); @@ -34,7 +33,6 @@ pub struct Process { pub threads: Vec>>, pub fds: BTreeMap, pub kernel_thread_stack_top: usize, - pub user_thread_stack_top: usize, pub thread_id_counter: usize, pub vm: Vm, pub cwd: Arc>, @@ -58,7 +56,6 @@ impl Process { threads: Vec::new(), fds: BTreeMap::new(), kernel_thread_stack_top: KERNEL_THREAD_STACK_TOP as usize, - user_thread_stack_top: USER_THREAD_STACK_TOP as usize, thread_id_counter: 0, vm: Vm::new(), cwd: VFS.lock().root_node(), @@ -85,23 +82,22 @@ impl Process { ); } - self.vm.add_entry( - VirtAddr::new(stack_bottom as u64), - STACK_SIZE as usize, - ProtectionFlags::READ | ProtectionFlags::WRITE, - MapFlags::ANONYMOUS, - ); + // TODO: how to unmap this page later?? + // We can't add it to Vm because it's not in the lower half of the address space stack_top } pub fn new_user_stack(&mut self) -> usize { - let stack_top = self.user_thread_stack_top; - self.user_thread_stack_top -= STACK_SIZE as usize; - let stack_bottom = self.user_thread_stack_top; - - // create guard page - self.user_thread_stack_top -= Size4KiB::SIZE as usize; + let stack_bottom = self + .vm + .create_entry_high( + STACK_SIZE as usize, + ProtectionFlags::READ | ProtectionFlags::WRITE, + MapFlags::ANONYMOUS, + ) + .as_u64() as usize; + let stack_top = STACK_SIZE as usize + stack_bottom; for addr in (stack_bottom..stack_top).step_by(Size4KiB::SIZE as usize) { let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); @@ -118,13 +114,6 @@ impl Process { ); } - self.vm.add_entry( - VirtAddr::new(stack_bottom as u64), - STACK_SIZE as usize, - ProtectionFlags::READ | ProtectionFlags::WRITE, - MapFlags::ANONYMOUS, - ); - stack_top } From a6ecd6a011ceb7bcbe1565a8ab378f713cac02b3 Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 20:36:44 +0100 Subject: [PATCH 02/12] improve order of mem consts --- xernel/kernel/src/mem/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xernel/kernel/src/mem/mod.rs b/xernel/kernel/src/mem/mod.rs index fcfecfaa..3a5343c6 100644 --- a/xernel/kernel/src/mem/mod.rs +++ b/xernel/kernel/src/mem/mod.rs @@ -17,8 +17,8 @@ pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000; // NOTE: stack grows down pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000; -pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000; pub const PROCESS_START: u64 = 0x0000_0001_0000_0000; +pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000; pub const STACK_SIZE: u64 = 0x40000; pub const FRAME_SIZE: u64 = 4096; From 8185a24b9cfbc890b45bc6b4f53a9cf21931b1df Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 20:47:50 +0100 Subject: [PATCH 03/12] fix available check for vm --- xernel/kernel/src/mem/vm.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xernel/kernel/src/mem/vm.rs b/xernel/kernel/src/mem/vm.rs index b148e1e5..2652d476 100644 --- a/xernel/kernel/src/mem/vm.rs +++ b/xernel/kernel/src/mem/vm.rs @@ -49,7 +49,7 @@ impl Vm { } pub fn is_available(&self, start: VirtAddr, length: usize) -> bool { - self.entries.iter().any(|(_, entry)| { + !self.entries.iter().any(|(_, entry)| { entry.start < start && entry.end() + Size4KiB::SIZE > start || start + length + Size4KiB::SIZE > entry.start && (start + length + Size4KiB::SIZE).as_u64() < Size4KiB::SIZE @@ -64,7 +64,7 @@ impl Vm { let mut start_address = VirtAddr::new(PROCESS_END - length as u64); loop { - if self.is_available(start_address, length) { + if self.is_available(start_address, length) { if start_address.as_u64() < PROCESS_START { panic!( "create_entry_high: {:x}(length = {}) is out of bounds", From bd2a0c6f33938017fc4d4b95e7dda8633b11bad7 Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 20:50:00 +0100 Subject: [PATCH 04/12] fix typo --- xernel/kernel/src/sched/process.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xernel/kernel/src/sched/process.rs b/xernel/kernel/src/sched/process.rs index 36401ffe..c6eeb905 100644 --- a/xernel/kernel/src/sched/process.rs +++ b/xernel/kernel/src/sched/process.rs @@ -82,7 +82,7 @@ impl Process { ); } - // TODO: how to unmap this page later?? + // TODO: how to unmap this stack later?? // We can't add it to Vm because it's not in the lower half of the address space stack_top From c284012b7941e0852eaa123dd465c9f9c212006f Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:02:47 +0100 Subject: [PATCH 05/12] improve process start address --- xernel/kernel/src/mem/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xernel/kernel/src/mem/mod.rs b/xernel/kernel/src/mem/mod.rs index 3a5343c6..b29f1abc 100644 --- a/xernel/kernel/src/mem/mod.rs +++ b/xernel/kernel/src/mem/mod.rs @@ -17,7 +17,7 @@ pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000; // NOTE: stack grows down pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000; -pub const PROCESS_START: u64 = 0x0000_0001_0000_0000; +pub const PROCESS_START: u64 = 0x0000_0000_0040_0000; pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000; pub const STACK_SIZE: u64 = 0x40000; From 738d881d5d17ca7fc406edb897c9f2cb581d6bb8 Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:10:45 +0100 Subject: [PATCH 06/12] proberly check if a process is a kernel process --- xernel/kernel/src/mem/mmap.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xernel/kernel/src/mem/mmap.rs b/xernel/kernel/src/mem/mmap.rs index ee483dc1..fb72f308 100644 --- a/xernel/kernel/src/mem/mmap.rs +++ b/xernel/kernel/src/mem/mmap.rs @@ -59,7 +59,7 @@ pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool let base_addr = addr.align_down(Size4KiB::SIZE); let frame = FRAME_ALLOCATOR.lock().allocate_frame::().unwrap(); - let pt_flags = ptflags_from_protflags(vm_entry.prot, true); // TODO: don't hardcode user_accessible + let pt_flags = ptflags_from_protflags(vm_entry.prot, process.page_table.is_some()); let mut pt = process.get_page_table().unwrap(); pt.map::(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true); From c877ab94d198fd56b5917d462f50f8fa31d0166c Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:20:13 +0100 Subject: [PATCH 07/12] implement deallocation of mappings --- xernel/kernel/src/mem/paging.rs | 31 +++++++++++++++++++++++++++++++ xernel/kernel/src/mem/vm.rs | 29 +++++++++++++++++++++++++---- 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index d1c11266..d69d8163 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -293,6 +293,37 @@ impl Pagemap { (*pml1)[virt.p1_index()].set_unused(); } } + + /// Only works with 4KiB pages + pub fn translate(&self, virt: VirtAddr) -> Option { + let pml4 = self.page_table; + + unsafe { + if !(*pml4)[virt.p4_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml3 = (*pml4)[virt.p4_index()].addr().as_u64() as *mut PageTable; + + if !(*pml3)[virt.p3_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml2 = (*pml3)[virt.p3_index()].addr().as_u64() as *mut PageTable; + + if !(*pml2)[virt.p2_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + let pml1 = (*pml2)[virt.p2_index()].addr().as_u64() as *mut PageTable; + + if !(*pml1)[virt.p1_index()].flags().contains(PageTableFlags::PRESENT) { + return None; + } + + Some((*pml1)[virt.p1_index()].addr() + u64::from(virt.page_offset())) + } + } } impl Drop for Pagemap { diff --git a/xernel/kernel/src/mem/vm.rs b/xernel/kernel/src/mem/vm.rs index 2652d476..a036a977 100644 --- a/xernel/kernel/src/mem/vm.rs +++ b/xernel/kernel/src/mem/vm.rs @@ -1,13 +1,15 @@ use alloc::collections::BTreeMap; use libxernel::syscall::{MapFlags, ProtectionFlags}; -use x86_64::structures::paging::PageTableFlags; +use x86_64::structures::paging::{PageTableFlags, PhysFrame}; use x86_64::{ structures::paging::{PageSize, Size4KiB}, VirtAddr, }; use crate::mem::PROCESS_END; +use crate::sched::scheduler::Scheduler; +use super::frame::FRAME_ALLOCATOR; use super::{PROCESS_START, STACK_SIZE}; pub struct VmEntry { @@ -23,6 +25,25 @@ impl VmEntry { pub fn end(&self) -> VirtAddr { self.start + self.length } + + pub fn unmap(&self) { + let process = Scheduler::current_process(); + let process = process.lock(); + + // SAFETY: only userspace processes should have Vm mappings + let mut page_mapper = process.get_page_table().unwrap(); + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + + for page in (self.start..self.end()).step_by(Size4KiB::SIZE as usize) { + if let Some(phys_addr) = page_mapper.translate(page) { + unsafe { + frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); + } + } + + page_mapper.unmap(page); + } + } } pub struct Vm { @@ -64,7 +85,7 @@ impl Vm { let mut start_address = VirtAddr::new(PROCESS_END - length as u64); loop { - if self.is_available(start_address, length) { + if self.is_available(start_address, length) { if start_address.as_u64() < PROCESS_START { panic!( "create_entry_high: {:x}(length = {}) is out of bounds", @@ -159,8 +180,8 @@ impl Vm { } pub fn clean_up(&mut self) { - todo!("clean up all mappings and free memory") - // NOTE: don't forget to remove the entries from the vector + self.entries.values().for_each(|value| value.unmap()); + self.entries.clear(); } } From 72e6a03e384e5f5445336cf6a7e7be37f2c29d66 Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:31:21 +0100 Subject: [PATCH 08/12] implement deallocation of page table --- xernel/kernel/src/mem/paging.rs | 39 ++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index d69d8163..776132ea 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -324,11 +324,48 @@ impl Pagemap { Some((*pml1)[virt.p1_index()].addr() + u64::from(virt.page_offset())) } } + + fn deallocate_pt(pt: *mut PageTable, level: u8) { + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + if level == 1 { + for i in 0..128 { + unsafe { + if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { + let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; + + Self::deallocate_pt(pt, level + 1); + } + } + } + + unsafe { + frame_allocator.deallocate_frame( + PhysFrame::::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(), + ); + } + } else if level <= 3 { + for i in 0..256 { + unsafe { + if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { + let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; + + Self::deallocate_pt(pt, level + 1); + } + } + } + + unsafe { + frame_allocator.deallocate_frame( + PhysFrame::::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(), + ); + } + } + } } impl Drop for Pagemap { fn drop(&mut self) { - todo!("drop pagemap") + Self::deallocate_pt(self.page_table, 1); } } From 0770c2ad5bac594d2389dfe0b4fe0469488b2b7a Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:40:17 +0100 Subject: [PATCH 09/12] deallocate kernel stack --- xernel/kernel/src/sched/process.rs | 3 --- xernel/kernel/src/sched/thread.rs | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/xernel/kernel/src/sched/process.rs b/xernel/kernel/src/sched/process.rs index c6eeb905..8b25c3fe 100644 --- a/xernel/kernel/src/sched/process.rs +++ b/xernel/kernel/src/sched/process.rs @@ -82,9 +82,6 @@ impl Process { ); } - // TODO: how to unmap this stack later?? - // We can't add it to Vm because it's not in the lower half of the address space - stack_top } diff --git a/xernel/kernel/src/sched/thread.rs b/xernel/kernel/src/sched/thread.rs index d2cd373d..abd3d08f 100644 --- a/xernel/kernel/src/sched/thread.rs +++ b/xernel/kernel/src/sched/thread.rs @@ -1,5 +1,10 @@ use alloc::sync::Arc; use libxernel::sync::Spinlock; +use x86_64::structures::paging::{Page, PageSize, PhysFrame, Size4KiB}; + +use crate::mem::frame::FRAME_ALLOCATOR; +use crate::mem::paging::KERNEL_PAGE_MAPPER; +use crate::mem::STACK_SIZE; use super::context::CpuContext; use super::process::{Process, KERNEL_PROCESS}; @@ -167,3 +172,22 @@ impl Thread { self.process.upgrade() } } + +impl Drop for Thread { + fn drop(&mut self) { + if self.is_kernel_thread() { + let mut page_mapper = KERNEL_PAGE_MAPPER.lock(); + let mut frame_allocator = FRAME_ALLOCATOR.lock(); + + for addr in (self.thread_stack..self.thread_stack + STACK_SIZE as usize).step_by(Size4KiB::SIZE as usize) { + unsafe { + let page = Page::::from_start_address(VirtAddr::new(addr as u64)).unwrap(); + let phys_addr = page_mapper.translate(page.start_address()).unwrap(); + + frame_allocator.deallocate_frame(PhysFrame::::containing_address(phys_addr)); + page_mapper.unmap(page.start_address()); + } + } + } + } +} From fc6b265e5b5eda7ba08be68779381c3bedc34242 Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Tue, 23 Jan 2024 21:57:37 +0100 Subject: [PATCH 10/12] fix deadlock --- xernel/kernel/src/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xernel/kernel/src/main.rs b/xernel/kernel/src/main.rs index 7aacd695..33095f27 100644 --- a/xernel/kernel/src/main.rs +++ b/xernel/kernel/src/main.rs @@ -170,7 +170,8 @@ extern "C" fn kernel_main() -> ! { PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, true, ); - process.lock().get_page_table().unwrap().map( + let mut pm = process.lock().get_page_table().unwrap(); + pm.map( page, Page::from_start_address(VirtAddr::new(0x200000)).unwrap(), PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT, From 23c5c6b3ae4425a91d7afa981d7bb1d15bd39e9a Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Thu, 25 Jan 2024 10:24:54 +0100 Subject: [PATCH 11/12] create helper function to get next page table --- xernel/kernel/src/mem/paging.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index 776132ea..8826d6ce 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -9,7 +9,7 @@ use limine::KernelAddressRequest; use x86_64::{ align_down, registers::control::{Cr3, Cr3Flags}, - structures::paging::{Page, PageSize, Size1GiB, Size2MiB, Size4KiB}, + structures::paging::{Page, PageSize, PageTableIndex, Size1GiB, Size2MiB, Size4KiB}, }; use x86_64::{ structures::paging::{PageTable, PageTableFlags, PhysFrame}, @@ -294,6 +294,10 @@ impl Pagemap { } } + unsafe fn get_pt(pt: *mut PageTable, pt_index: PageTableIndex) -> *mut PageTable { + (*pt)[pt_index].addr().as_u64() as *mut PageTable + } + /// Only works with 4KiB pages pub fn translate(&self, virt: VirtAddr) -> Option { let pml4 = self.page_table; @@ -303,19 +307,19 @@ impl Pagemap { return None; } - let pml3 = (*pml4)[virt.p4_index()].addr().as_u64() as *mut PageTable; + let pml3 = Self::get_pt(pml4, virt.p4_index()); if !(*pml3)[virt.p3_index()].flags().contains(PageTableFlags::PRESENT) { return None; } - let pml2 = (*pml3)[virt.p3_index()].addr().as_u64() as *mut PageTable; + let pml2 = Self::get_pt(pml3, virt.p3_index()); if !(*pml2)[virt.p2_index()].flags().contains(PageTableFlags::PRESENT) { return None; } - let pml1 = (*pml2)[virt.p2_index()].addr().as_u64() as *mut PageTable; + let pml1 = Self::get_pt(pml2, virt.p2_index()); if !(*pml1)[virt.p1_index()].flags().contains(PageTableFlags::PRESENT) { return None; From 38e5e78c31f9bc8c122ad48f010f24be5ed7f1ee Mon Sep 17 00:00:00 2001 From: 0x6D70 Date: Thu, 25 Jan 2024 10:45:12 +0100 Subject: [PATCH 12/12] start with level 4 when deallocating page table --- xernel/kernel/src/mem/paging.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xernel/kernel/src/mem/paging.rs b/xernel/kernel/src/mem/paging.rs index 8826d6ce..d40627d4 100644 --- a/xernel/kernel/src/mem/paging.rs +++ b/xernel/kernel/src/mem/paging.rs @@ -331,13 +331,13 @@ impl Pagemap { fn deallocate_pt(pt: *mut PageTable, level: u8) { let mut frame_allocator = FRAME_ALLOCATOR.lock(); - if level == 1 { + if level == 4 { for i in 0..128 { unsafe { if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; - Self::deallocate_pt(pt, level + 1); + Self::deallocate_pt(pt, level - 1); } } } @@ -347,13 +347,13 @@ impl Pagemap { PhysFrame::::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(), ); } - } else if level <= 3 { + } else if level > 1 { for i in 0..256 { unsafe { if (*pt)[i].flags().contains(PageTableFlags::PRESENT) { let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable; - Self::deallocate_pt(pt, level + 1); + Self::deallocate_pt(pt, level - 1); } } } @@ -369,7 +369,7 @@ impl Pagemap { impl Drop for Pagemap { fn drop(&mut self) { - Self::deallocate_pt(self.page_table, 1); + Self::deallocate_pt(self.page_table, 4); } }