From f398bfecba0a84aaa3f6e092fa7077112346b3c3 Mon Sep 17 00:00:00 2001 From: Andrew Fasano Date: Tue, 20 Feb 2024 13:07:39 -0500 Subject: [PATCH] WIP mmap support for dyndev --- drivers/dyndev/dyndev_devfs.c | 203 +++++++++++----------------------- 1 file changed, 66 insertions(+), 137 deletions(-) diff --git a/drivers/dyndev/dyndev_devfs.c b/drivers/dyndev/dyndev_devfs.c index 4eaf7791ddf638..2f5c41b767aa9e 100644 --- a/drivers/dyndev/dyndev_devfs.c +++ b/drivers/dyndev/dyndev_devfs.c @@ -43,7 +43,9 @@ static int dyndev_release(struct inode *inodep, struct file *filep) { return 0; } -static ssize_t dyndev_read(struct file *filep, char *buffer, size_t len, loff_t *offset) { + + +static ssize_t _dyndev_read(struct file *filep, char *buffer, size_t len, loff_t *offset, bool kernel) { char *full_path; char *path_buffer; ssize_t result; @@ -66,7 +68,11 @@ static ssize_t dyndev_read(struct file *filep, char *buffer, size_t len, loff_t return PTR_ERR(full_path); } - result = hypervisor_read(full_path, buffer, len, offset); + if (kernel) { + result = hypervisor_read_kernel(full_path, buffer, len, offset); + } else { + result = hypervisor_read(full_path, buffer, len, offset); + } // Free the temporary buffer kfree(path_buffer); @@ -74,6 +80,16 @@ static ssize_t dyndev_read(struct file *filep, char *buffer, size_t len, loff_t return result; } +static ssize_t dyndev_read(struct file *filep, char *buffer, size_t len, loff_t *offset) { + // Read into userspace buffer + return _dyndev_read(filep, buffer, len, offset, false); +} + +static ssize_t dyndev_read_kernel(struct file *filep, char *buffer, size_t len, loff_t *offset) { + // Read into kernel space buffer (for mmap) + return _dyndev_read(filep, buffer, len, offset, true); +} + static ssize_t dyndev_write(struct file *filep, const char *buffer, size_t len, loff_t *offset) { char *full_path; char *path_buffer; @@ -133,163 +149,76 @@ static long dyndev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg return hyper_op.rv; // Return the value fetched from the emulator } - -#if 0 -static void my_vm_open(struct vm_area_struct *vma) { - struct hyper_file_op* old_hyper_op = vma->vm_private_data; - //printk(KERN_INFO "dyndev open: virt %lx for hyper_file at %p\n", vma->vm_start, old_hyper_op); - - // Increment reference count to prevent premature free in case of fork - if (old_hyper_op) { - atomic_inc(&old_hyper_op->refcount); - } +static void dyndev_vma_close(struct vm_area_struct *vma) +{ + struct page *pages = vma->vm_private_data; + __free_pages(pages, get_order(vma->vm_end - vma->vm_start)); } -static void my_vm_close(struct vm_area_struct *vma) { - struct hyper_file_op* hyper_op = vma->vm_private_data; - printk(KERN_INFO "dyndev close: virt %lx for hyper_file at %p\n", vma->vm_start, hyper_op); - - if (hyper_op && atomic_dec_and_test(&hyper_op->refcount)) { - kfree(hyper_op); - vma->vm_private_data = NULL; - } - - #if 0 - if (hyper_op && atomic_dec_and_test(&hyper_op->refcount)) { - // Execute the hypercall for write - //printk(KERN_INFO "\t: writing back to device %s\n", hyper_op->device_name); - //printk(KERN_INFO "\t: kbuf is at %p\n", hyper_op->args.read_args.buffer); - - hyper_op->type = HYPER_WRITE; - sync_struct(hyper_op); - //printk(KERN_INFO "\t: rv is %ld\n", hyper_op->rv); - - // Free the buffer and hyper op struct - if (hyper_op->args.read_args.buffer) { - //printk(KERN_INFO "\t: free_pages for kernel buf at %p\n", hyper_op->args.read_args.buffer); - free_pages((unsigned long)hyper_op->args.read_args.buffer, get_order(vma->vm_end - vma->vm_start)); - } - - //printk(KERN_INFO "\t: kfreeing hyper op struct %p\n", hyper_op); - kfree(hyper_op); - - vma->vm_private_data = NULL; - } - #endif -} +static const struct vm_operations_struct dyndev_vm_ops = { + .close = dyndev_vma_close, +}; -static int my_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf) { - struct page *page; - char *kernel_buffer; - struct hyper_file_op *hyper_op = vmf->vma->vm_private_data; - ssize_t ret; - unsigned long address = (unsigned long)vmf->address; - void *page_ptr; - bool is_write = vmf->flags & FAULT_FLAG_WRITE; +static int dyndev_mmap(struct file *filp, struct vm_area_struct *vma) { + size_t len = vma->vm_end - vma->vm_start; + struct page *pages; + unsigned long num_pages; + unsigned long start = vma->vm_start; + char *buffer; unsigned long pfn; + int i, ret; - printk(KERN_INFO "dyndev: page fault at address %lx. Write=%d\n", address, is_write); + // Calculate the number of pages + num_pages = (len + PAGE_SIZE - 1) / PAGE_SIZE; - // Allocate a temporary kernel buffer - kernel_buffer = (char *)kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!kernel_buffer) { - return VM_FAULT_OOM; - } - - if (is_write) { - // Handle write fault: Copy data from user space to kernel buffer - if (copy_from_user(kernel_buffer, (char *)vmf->address, PAGE_SIZE)) { - kfree(kernel_buffer); - return VM_FAULT_SIGBUS; - } - - // TODO: Add your write handling logic here - hyper_op->type = HYPER_WRITE; - hyper_op->args.write_args.buffer = kernel_buffer; - sync_struct(hyper_op); - } else { - // Handle read fault: Populate kernel buffer with data for user space - if (copy_to_user((char *)vmf->address, kernel_buffer, PAGE_SIZE)) { - kfree(kernel_buffer); - return VM_FAULT_SIGBUS; - } - hyper_op->type = HYPER_READ; - hyper_op->args.read_args.buffer = kernel_buffer; - sync_struct(hyper_op); - } - - // Allocate a new page and copy data to it - page = alloc_page(GFP_KERNEL); - if (!page) { - kfree(kernel_buffer); - return VM_FAULT_OOM; - } - page_ptr = kmap(page); - memcpy(page_ptr, kernel_buffer, PAGE_SIZE); - kunmap(page); - - // Convert the page to a PFN - pfn = page_to_pfn(page); + // Allocate pages + pages = alloc_pages(GFP_KERNEL, get_order(len)); + if (!pages) + return -ENOMEM; - // Unmap the page to ensure the next access traps - zap_vma_ptes(vma, address & PAGE_MASK, PAGE_SIZE); + // Get the buffer from the allocated pages + buffer = kmap(pages); - // Insert the page into the user space - ret = vm_insert_pfn(vma, address & PAGE_MASK, pfn); - if (ret) { - __free_page(page); - kfree(kernel_buffer); + // Perform the "read" operation to fill the buffer + // Note: Modify dyndev_read to work with this buffer or replicate its functionality here +#if 0 + ret = dyndev_read_kernel(filp, buffer, len, 0); + if (ret < 0) { + kunmap(pages); + __free_pages(pages, get_order(len)); return ret; } - - kfree(kernel_buffer); - return VM_FAULT_NOPAGE; // Indicate that the fault has been handled -} - - -static const struct vm_operations_struct my_vm_ops = { - .open = my_vm_open, - .close = my_vm_close, - .fault = my_fault_handler, -}; - - -static int dyndev_mmap(struct file *filp, struct vm_area_struct *vma) { - struct hyper_file_op* hyper_op; - size_t len = vma->vm_end - vma->vm_start; - - hyper_op = kmalloc(sizeof(struct hyper_file_op), GFP_KERNEL); - if (!hyper_op) { - pr_err("Failed to allocate memory for hyper_op\n"); - return -ENOMEM; +#endif + // Placeholder, place "foo" into buffer + strcpy(buffer, "hello_world_this_is_a_test\0"); + + // Now map each page to the user space + for (i = 0; i < num_pages; i++) { + pfn = page_to_pfn(&pages[i]); + if (remap_pfn_range(vma, start, pfn, PAGE_SIZE, vma->vm_page_prot)) { + kunmap(pages); + __free_pages(pages, get_order(len)); + return -EAGAIN; + } + start += PAGE_SIZE; } - atomic_set(&hyper_op->refcount, 1); // Initialize reference count + // Unmap the buffer and ensure it's not accessed beyond this point + kunmap(pages); - hyper_op->type = HYPER_READ; // Initialize for read, can be changed in fault handler - snprintf(hyper_op->device_name, 128, "/dev/%s", filp->f_path.dentry->d_iname); - hyper_op->args.read_args.length = len; - hyper_op->args.read_args.offset = 0; - //hyper_op->args.read_args.offset = vma->vm_pgoff << PAGE_SHIFT; // ??? + // Store the pages pointer for cleanup + vma->vm_ops = &dyndev_vm_ops; + vma->vm_private_data = pages; - printk(KERN_ERR "dyndev: MMAPing device %s\n", hyper_op->device_name); - - // Set up the VMA - vma->vm_ops = &my_vm_ops; // Set the custom vm_ops - vma->vm_private_data = hyper_op; - vma->vm_flags |= VM_MIXEDMAP; // Indicate custom page fault handling - - // Do not map any pages here. Let the fault handler take care of it. return 0; } -#endif static struct file_operations fops = { .owner = THIS_MODULE, .open = dyndev_open, .read = dyndev_read, - //.mmap = dyndev_mmap, + .mmap = dyndev_mmap, .release = dyndev_release, .write = dyndev_write, .unlocked_ioctl = dyndev_ioctl,