Skip to content

Commit

Permalink
use treemap for vm
Browse files Browse the repository at this point in the history
  • Loading branch information
0x6D70 committed Jan 23, 2024
1 parent 89b77b6 commit 8e4e678
Show file tree
Hide file tree
Showing 4 changed files with 141 additions and 51 deletions.
5 changes: 2 additions & 3 deletions xernel/kernel/src/mem/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ pub fn mmap(

match flags {
MapFlags::ANONYMOUS => {
let start_address = process.vm().find_next_start_address();
process.vm().add_entry(start_address, len, prot, flags);
let start_address = process.vm().create_entry_at(addr, len, prot, flags);

Ok(start_address.as_u64() as isize)
}
Expand Down Expand Up @@ -60,7 +59,7 @@ pub fn handle_page_fault(addr: VirtAddr, error_code: PageFaultErrorCode) -> bool
let base_addr = addr.align_down(Size4KiB::SIZE);
let frame = FRAME_ALLOCATOR.lock().allocate_frame::<Size4KiB>().unwrap();

let pt_flags = ptflags_from_protflags(vm_entry.prot);
let pt_flags = ptflags_from_protflags(vm_entry.prot, true); // TODO: don't hardcode user_accessible
let mut pt = process.get_page_table().unwrap();

pt.map::<Size4KiB>(frame, Page::from_start_address(base_addr).unwrap(), pt_flags, true);
Expand Down
6 changes: 3 additions & 3 deletions xernel/kernel/src/mem/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ pub static HIGHER_HALF_OFFSET: Once<u64> = Once::new();

pub const KERNEL_OFFSET: u64 = 0xffff_ffff_8000_0000;
pub const HEAP_START_ADDR: usize = 0xffff_9000_0000_0000;
// NOTE: there are 16 TB available for mmap until it overflows into the non-canonical address space
pub const MMAP_START_ADDR: usize = 0x0000_7000_0000_0000;

// NOTE: stack grows down
pub const KERNEL_THREAD_STACK_TOP: u64 = 0xffff_a000_0000_0000;
pub const USER_THREAD_STACK_TOP: u64 = 0x0000_ffff_ffff_f000;

pub const PROCESS_END: u64 = 0x0000_ffff_ffff_f000;
pub const PROCESS_START: u64 = 0x0000_0001_0000_0000;

pub const STACK_SIZE: u64 = 0x40000;
pub const FRAME_SIZE: u64 = 4096;
Expand Down
144 changes: 123 additions & 21 deletions xernel/kernel/src/mem/vm.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
use alloc::vec::Vec;
use alloc::collections::BTreeMap;
use libxernel::syscall::{MapFlags, ProtectionFlags};
use x86_64::align_up;
use x86_64::structures::paging::PageTableFlags;
use x86_64::{
structures::paging::{PageSize, Size4KiB},
VirtAddr,
};

use super::MMAP_START_ADDR;
use crate::mem::PROCESS_END;

use super::{PROCESS_START, STACK_SIZE};

pub struct VmEntry {
start: VirtAddr,
length: usize,
pub start: VirtAddr,
pub length: usize,
pub prot: ProtectionFlags,
pub flags: MapFlags,
// TODO: add something to represent to which file this entry belongs to
Expand All @@ -25,38 +26,136 @@ impl VmEntry {
}

pub struct Vm {
entries: Vec<VmEntry>,
entries: BTreeMap<VirtAddr, VmEntry>,
}

impl Vm {
pub const fn new() -> Self {
Self { entries: Vec::new() }
Self {
entries: BTreeMap::new(),
}
}

pub fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) {
self.entries.push(VmEntry {
fn add_entry(&mut self, start: VirtAddr, length: usize, prot: ProtectionFlags, flags: MapFlags) {
let entry = VmEntry {
start,
length,
prot,
flags,
file: None,
});
};

self.entries.insert(start, entry);
}

pub fn is_available(&self, start: VirtAddr, length: usize) -> bool {
self.entries.iter().any(|(_, entry)| {
entry.start < start && entry.end() + Size4KiB::SIZE > start
|| start + length + Size4KiB::SIZE > entry.start
&& (start + length + Size4KiB::SIZE).as_u64() < Size4KiB::SIZE
})
}

pub fn find_next_start_address(&self) -> VirtAddr {
let last_entry = self.entries.last();
pub fn create_entry_low(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr {
self.create_entry_at(VirtAddr::new(PROCESS_START), length, prot, flags)
}

pub fn create_entry_high(&mut self, length: usize, prot: ProtectionFlags, flags: MapFlags) -> VirtAddr {
let mut start_address = VirtAddr::new(PROCESS_END - length as u64);

loop {
if self.is_available(start_address, length) {
if start_address.as_u64() < PROCESS_START {
panic!(
"create_entry_high: {:x}(length = {}) is out of bounds",
start_address, length
);
}

self.add_entry(start_address, length, prot, flags);
return start_address;
}

if let Some(last_entry) = last_entry {
VirtAddr::new(align_up(last_entry.end().as_u64(), Size4KiB::SIZE))
} else {
VirtAddr::new(MMAP_START_ADDR as u64)
// NOTE: at the moment only a stack should be create at the high end of the process address space
start_address -= STACK_SIZE;
}
}

/// A new entry is created at the given address or higher
pub fn create_entry_at(
&mut self,
mut start: VirtAddr,
length: usize,
prot: ProtectionFlags,
flags: MapFlags,
) -> VirtAddr {
if start.as_u64() + length as u64 > PROCESS_END {
panic!("create_entry_at: {:x}(length = {}) is out of bounds", start, length);
}

if !start.is_aligned(Size4KiB::SIZE) {
panic!("create_entry_at: {:x} is not aligned", start);
}

if start.as_u64() < PROCESS_START {
start = VirtAddr::new(PROCESS_START);
}

if self.is_available(start, length) {
self.add_entry(start, length, prot, flags);
return start;
}

let mut values_iter = self.entries.values();
let mut previous = values_iter.next().unwrap();
let current = values_iter.next();

if current.is_none() {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

self.add_entry(new_start, length, prot, flags);
return new_start;
}

let mut current = current.unwrap();

loop {
if current.start - previous.end() >= length as u64 + 2 * Size4KiB::SIZE {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

self.add_entry(new_start, length, prot, flags);
return new_start;
}

previous = current;
let current_opt = values_iter.next();

if current_opt.is_none() {
let new_start = previous.end() + Size4KiB::SIZE;
let new_start = new_start.align_up(Size4KiB::SIZE);

if new_start.as_u64() + length as u64 > PROCESS_END {
panic!(
"create_entry_at: {:x}(length = {}) is out of bounds! Vm space is exhausted",
new_start, length
);
}

self.add_entry(new_start, length, prot, flags);
return new_start;
}

current = current_opt.unwrap();
}
}

pub fn get_entry_from_address(&self, addr: VirtAddr) -> Option<&VmEntry> {
self.entries
.iter()
.find(|entry| entry.start <= addr && entry.end() > addr)
.find(|(_, entry)| entry.start <= addr && entry.end() > addr)
.map(|(_, entry)| entry)
}

pub fn clean_up(&mut self) {
Expand All @@ -65,12 +164,15 @@ impl Vm {
}
}

pub fn ptflags_from_protflags(flags: ProtectionFlags) -> PageTableFlags {
pub fn ptflags_from_protflags(flags: ProtectionFlags, user_accessible: bool) -> PageTableFlags {
let mut new_flags = PageTableFlags::PRESENT;

if flags.contains(ProtectionFlags::READ) {
// TODO: how to handle this??
todo!("PageTableFlags::READ")
if user_accessible {
new_flags |= PageTableFlags::USER_ACCESSIBLE;
}

if !flags.contains(ProtectionFlags::READ) {
// NOTE: it is not possible to remove read access from a page
}

if flags.contains(ProtectionFlags::WRITE) {
Expand Down
37 changes: 13 additions & 24 deletions xernel/kernel/src/sched/process.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
use alloc::sync::Weak;
use core::sync::atomic::{AtomicUsize, Ordering};
use libxernel::syscall::{MapFlags, ProtectionFlags};
use x86_64::structures::paging::{Page, PageSize, PageTableFlags, Size4KiB};
use x86_64::VirtAddr;

use crate::fs::file::File;
use crate::fs::vnode::VNode;
use crate::mem::frame::FRAME_ALLOCATOR;
use crate::mem::vm::Vm;
use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE, USER_THREAD_STACK_TOP};
use crate::mem::{KERNEL_THREAD_STACK_TOP, STACK_SIZE};
use crate::VFS;
use alloc::collections::BTreeMap;
use alloc::sync::Arc;
Expand All @@ -18,8 +19,6 @@ use libxernel::sync::{Once, Spinlock};
use crate::mem::paging::{Pagemap, KERNEL_PAGE_MAPPER};
use crate::sched::thread::Thread;

use libxernel::syscall::{MapFlags, ProtectionFlags};

/// Ongoing counter for the ProcessID
static PROCESS_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);

Expand All @@ -34,7 +33,6 @@ pub struct Process {
pub threads: Vec<Arc<Spinlock<Thread>>>,
pub fds: BTreeMap<usize, File>,
pub kernel_thread_stack_top: usize,
pub user_thread_stack_top: usize,
pub thread_id_counter: usize,
pub vm: Vm,
pub cwd: Arc<Spinlock<VNode>>,
Expand All @@ -58,7 +56,6 @@ impl Process {
threads: Vec::new(),
fds: BTreeMap::new(),
kernel_thread_stack_top: KERNEL_THREAD_STACK_TOP as usize,
user_thread_stack_top: USER_THREAD_STACK_TOP as usize,
thread_id_counter: 0,
vm: Vm::new(),
cwd: VFS.lock().root_node(),
Expand All @@ -85,23 +82,22 @@ impl Process {
);
}

self.vm.add_entry(
VirtAddr::new(stack_bottom as u64),
STACK_SIZE as usize,
ProtectionFlags::READ | ProtectionFlags::WRITE,
MapFlags::ANONYMOUS,
);
// TODO: how to unmap this page later??
// We can't add it to Vm because it's not in the lower half of the address space

stack_top
}

pub fn new_user_stack(&mut self) -> usize {
let stack_top = self.user_thread_stack_top;
self.user_thread_stack_top -= STACK_SIZE as usize;
let stack_bottom = self.user_thread_stack_top;

// create guard page
self.user_thread_stack_top -= Size4KiB::SIZE as usize;
let stack_bottom = self
.vm
.create_entry_high(
STACK_SIZE as usize,
ProtectionFlags::READ | ProtectionFlags::WRITE,
MapFlags::ANONYMOUS,
)
.as_u64() as usize;
let stack_top = STACK_SIZE as usize + stack_bottom;

for addr in (stack_bottom..stack_top).step_by(Size4KiB::SIZE as usize) {
let phys_page = FRAME_ALLOCATOR.lock().allocate_frame::<Size4KiB>().unwrap();
Expand All @@ -118,13 +114,6 @@ impl Process {
);
}

self.vm.add_entry(
VirtAddr::new(stack_bottom as u64),
STACK_SIZE as usize,
ProtectionFlags::READ | ProtectionFlags::WRITE,
MapFlags::ANONYMOUS,
);

stack_top
}

Expand Down

0 comments on commit 8e4e678

Please sign in to comment.