Skip to content

Commit

Permalink
Merge pull request #7 from anubis-rs/rework-vm
Browse files Browse the repository at this point in the history
Rework Vm
  • Loading branch information
0x6D70 authored Jan 25, 2024
2 parents 89b77b6 + 38e5e78 commit 430dc39
Show file tree
Hide file tree
Showing 7 changed files with 263 additions and 58 deletions.
3 changes: 2 additions & 1 deletion xernel/kernel/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ extern "C" fn kernel_main() -> ! {
PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT,
true,
);
process.lock().get_page_table().unwrap().map(
let mut pm = process.lock().get_page_table().unwrap();
pm.map(
page,
Page::from_start_address(VirtAddr::new(0x200000)).unwrap(),
PageTableFlags::WRITABLE | PageTableFlags::USER_ACCESSIBLE | PageTableFlags::PRESENT,
Expand Down
5 changes: 2 additions & 3 deletions xernel/kernel/src/mem/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ pub fn mmap(

match flags {
MapFlags::ANONYMOUS => {
let start_address = process.vm().find_next_start_address();
process.vm().add_entry(start_address, len, prot, flags);
let start_address = process.vm().create_entry_at(addr, len, prot, flags);

Ok(start_address.as_u64() as isize)
}
Expand Down Expand Up @@ -60,7 +59,7 @@ pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool
let base_addr = addr.align_down(Size4KiB::SIZE);
let frame = FRAME_ALLOCATOR.lock().allocate_frame::<Size4KiB>().unwrap();

let pt_flags = ptflags_from_protflags(vm_entry.prot);
let pt_flags = ptflags_from_protflags(vm_entry.prot, process.page_table.is_some());
let mut pt = process.get_page_table().unwrap();

pt.map::<Size4KiB>(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true);
Expand Down
6 changes: 3 additions & 3 deletions xernel/kernel/src/mem/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ pub static HIGHER_HALF_OFFSET: Once<u64> = Once::new();

pub const KERNEL_OFFSET: u64 = 0xffff_ffff_8000_0000;
pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000;
// NOTE: there are 16 TB available for mmap until it overflows into the non-canonical address space
pub const MMAP_START_ADDR: usize = 0x0000_7000_0000_0000;

// NOTE: stack grows down
pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000;
pub const USER_THREAD_STACK_TOP: u64 = 0x0000_ffff_ffff_f000;

pub const PROCESS_START: u64 = 0x0000_0000_0040_0000;
pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000;

pub const STACK_SIZE: u64 = 0x40000;
pub const FRAME_SIZE: u64 = 4096;
Expand Down
76 changes: 74 additions & 2 deletions xernel/kernel/src/mem/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use limine::KernelAddressRequest;
use x86_64::{
align_down,
registers::control::{Cr3, Cr3Flags},
structures::paging::{Page, PageSize, Size1GiB, Size2MiB, Size4KiB},
structures::paging::{Page, PageSize, PageTableIndex, Size1GiB, Size2MiB, Size4KiB},
};
use x86_64::{
structures::paging::{PageTable, PageTableFlags, PhysFrame},
Expand Down Expand Up @@ -293,11 +293,83 @@ impl Pagemap {
(*pml1)[virt.p1_index()].set_unused();
}
}

unsafe fn get_pt(pt: *mut PageTable, pt_index: PageTableIndex) -> *mut PageTable {
(*pt)[pt_index].addr().as_u64() as *mut PageTable
}

/// Only works with 4KiB pages
pub fn translate(&self, virt: VirtAddr) -> Option<PhysAddr> {
let pml4 = self.page_table;

unsafe {
if !(*pml4)[virt.p4_index()].flags().contains(PageTableFlags::PRESENT) {
return None;
}

let pml3 = Self::get_pt(pml4, virt.p4_index());

if !(*pml3)[virt.p3_index()].flags().contains(PageTableFlags::PRESENT) {
return None;
}

let pml2 = Self::get_pt(pml3, virt.p3_index());

if !(*pml2)[virt.p2_index()].flags().contains(PageTableFlags::PRESENT) {
return None;
}

let pml1 = Self::get_pt(pml2, virt.p2_index());

if !(*pml1)[virt.p1_index()].flags().contains(PageTableFlags::PRESENT) {
return None;
}

Some((*pml1)[virt.p1_index()].addr() + u64::from(virt.page_offset()))
}
}

fn deallocate_pt(pt: *mut PageTable, level: u8) {
let mut frame_allocator = FRAME_ALLOCATOR.lock();
if level == 4 {
for i in 0..128 {
unsafe {
if (*pt)[i].flags().contains(PageTableFlags::PRESENT) {
let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable;

Self::deallocate_pt(pt, level - 1);
}
}
}

unsafe {
frame_allocator.deallocate_frame(
PhysFrame::<Size4KiB>::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(),
);
}
} else if level > 1 {
for i in 0..256 {
unsafe {
if (*pt)[i].flags().contains(PageTableFlags::PRESENT) {
let pt = ((*pt)[i].addr().as_u64() + *HIGHER_HALF_OFFSET) as *mut PageTable;

Self::deallocate_pt(pt, level - 1);
}
}
}

unsafe {
frame_allocator.deallocate_frame(
PhysFrame::<Size4KiB>::from_start_address(PhysAddr::new(pt as u64 - *HIGHER_HALF_OFFSET)).unwrap(),
);
}
}
}
}

impl Drop for Pagemap {
fn drop(&mut self) {
todo!("drop pagemap")
Self::deallocate_pt(self.page_table, 4);
}
}

Expand Down
171 changes: 147 additions & 24 deletions xernel/kernel/src/mem/vm.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
use alloc::vec::Vec;
use alloc::collections::BTreeMap;
use libxernel::syscall::{MapFlags, ProtectionFlags};
use x86_64::align_up;
use x86_64::structures::paging::PageTableFlags;
use x86_64::structures::paging::{PageTableFlags, PhysFrame};
use x86_64::{
structures::paging::{PageSize, Size4KiB},
VirtAddr,
};

use super::MMAP_START_ADDR;
use crate::mem::PROCESS_END;
use crate::sched::scheduler::Scheduler;

use super::frame::FRAME_ALLOCATOR;
use super::{PROCESS_START, STACK_SIZE};

pub struct VmEntry {
start: VirtAddr,
length: usize,
pub start: VirtAddr,
pub length: usize,
pub prot: ProtectionFlags,
pub flags: MapFlags,
// TODO: add something to represent to which file this entry belongs to
Expand All @@ -22,55 +25,175 @@ impl VmEntry {
pub fn end(&self) -> VirtAddr {
self.start + self.length
}

pub fn unmap(&self) {
let process = Scheduler::current_process();
let process = process.lock();

// SAFETY: only userspace processes should have Vm mappings
let mut page_mapper = process.get_page_table().unwrap();
let mut frame_allocator = FRAME_ALLOCATOR.lock();

for page in (self.start..self.end()).step_by(Size4KiB::SIZE as usize) {
if let Some(phys_addr) = page_mapper.translate(page) {
unsafe {
frame_allocator.deallocate_frame(PhysFrame::<Size4KiB>::containing_address(phys_addr));
}
}

page_mapper.unmap(page);
}
}
}

pub struct Vm {
entries: Vec<VmEntry>,
entries: BTreeMap<VirtAddr, VmEntry>,
}

impl Vm {
pub const fn new() -> Self {
Self { entries: Vec::new() }
Self {
entries: BTreeMap::new(),
}
}

pub fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) {
self.entries.push(VmEntry {
fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) {
let entry = VmEntry {
start,
length,
prot,
flags,
file: None,
});
};

self.entries.insert(start, entry);
}

pub fn is_available(&self, start: VirtAddr, length: usize) -> bool {
!self.entries.iter().any(|(_, entry)| {
entry.start < start && entry.end() + Size4KiB::SIZE > start
|| start + length + Size4KiB::SIZE > entry.start
&& (start + length + Size4KiB::SIZE).as_u64() < Size4KiB::SIZE
})
}

pub fn create_entry_low(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr {
self.create_entry_at(VirtAddr::new(PROCESS_START), length, prot, flags)
}

pub fn find_next_start_address(&self) -> VirtAddr {
let last_entry = self.entries.last();
pub fn create_entry_high(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr {
let mut start_address = VirtAddr::new(PROCESS_END - length as u64);

loop {
if self.is_available(start_address, length) {
if start_address.as_u64() < PROCESS_START {
panic!(
"create_entry_high: {:x}(length = {}) is out of bounds",
start_address, length
);
}

self.add_entry(start_address, length, prot, flags);
return start_address;
}

if let Some(last_entry) = last_entry {
VirtAddr::new(align_up(last_entry.end().as_u64(), Size4KiB::SIZE))
} else {
VirtAddr::new(MMAP_START_ADDR as u64)
// NOTE: at the moment only a stack should be create at the high end of the process address space
start_address -= STACK_SIZE;
}
}

/// A new entry is created at the given address or higher
pub fn create_entry_at(
&mut self,
mut start: VirtAddr,
length: usize,
prot: ProtectionFlags,
flags: MapFlags,
) -> VirtAddr {
if start.as_u64() + length as u64 > PROCESS_END {
panic!("create_entry_at: {:x}(length = {}) is out of bounds", start, length);
}

if !start.is_aligned(Size4KiB::SIZE) {
panic!("create_entry_at: {:x} is not aligned", start);
}

if start.as_u64() < PROCESS_START {
start = VirtAddr::new(PROCESS_START);
}

if self.is_available(start, length) {
self.add_entry(start, length, prot, flags);
return start;
}

let mut values_iter = self.entries.values();
let mut previous = values_iter.next().unwrap();
let current = values_iter.next();

if current.is_none() {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

self.add_entry(new_start, length, prot, flags);
return new_start;
}

let mut current = current.unwrap();

loop {
if current.start - previous.end() >= length as u64 + 2 * Size4KiB::SIZE {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

self.add_entry(new_start, length, prot, flags);
return new_start;
}

previous = current;
let current_opt = values_iter.next();

if current_opt.is_none() {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

if new_start.as_u64() + length as u64 > PROCESS_END {
panic!(
"create_entry_at: {:x}(length = {}) is out of bounds! Vm space is exhausted",
new_start, length
);
}

self.add_entry(new_start, length, prot, flags);
return new_start;
}

current = current_opt.unwrap();
}
}

pub fn get_entry_from_address(&self, addr: VirtAddr) -> Option<&VmEntry> {
self.entries
.iter()
.find(|entry| entry.start <= addr && entry.end() > addr)
.find(|(_, entry)| entry.start <= addr && entry.end() > addr)
.map(|(_, entry)| entry)
}

pub fn clean_up(&mut self) {
todo!("clean up all mappings and free memory")
// NOTE: don't forget to remove the entries from the vector
self.entries.values().for_each(|value| value.unmap());
self.entries.clear();
}
}

pub fn ptflags_from_protflags(flags: ProtectionFlags) -> PageTableFlags {
pub fn ptflags_from_protflags(flags: ProtectionFlags, user_accessible: bool) -> PageTableFlags {
let mut new_flags = PageTableFlags::PRESENT;

if flags.contains(ProtectionFlags::READ) {
// TODO: how to handle this??
todo!("PageTableFlags::READ")
if user_accessible {
new_flags |= PageTableFlags::USER_ACCESSIBLE;
}

if !flags.contains(ProtectionFlags::READ) {
// NOTE: it is not possible to remove read access from a page
}

if flags.contains(ProtectionFlags::WRITE) {
Expand Down
Loading

0 comments on commit 430dc39

Please sign in to comment.