Skip to content

Commit

Permalink
Pagecache: use an objcache as page heap
Browse files Browse the repository at this point in the history
In the pagecache code, allocating 4KB pages directly from the
linear backed heap creates excessive fragmentation, which in
low-memory conditions may cause other allocations in the kernel
(which typically use objcaches with a 2MB page size) to fail even
when pagecache drain requests are fully executed.
This commit addresses the fragmentation issue by changing the
pagecache code to use an objcache (with a 2MB page size, and with
the linear backed heap as parent heap) as page heap; this allows a
pagecache drain to release memory in large chunks that are then
available for other allocation requests throughout the kernel.
In addition, the pagecache completion heap is now drained if a
drain request could not be fully executed by draining the page
heap.
  • Loading branch information
francescolavra committed Jul 29, 2022
1 parent 199bdab commit 7fe44ea
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 2 deletions.
2 changes: 2 additions & 0 deletions src/config.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,8 @@
/* Number of objects that should be retained in the cache when a cache drain is requested */
#define NET_RX_BUFFERS_RETAIN 64
#define STORAGE_REQUESTS_RETAIN 64
#define PAGECACHE_PAGES_RETAIN 64
#define PAGECACHE_COMPLETIONS_RETAIN 64

/* must be large enough for vendor code that use malloc/free interface */
#define MAX_MCACHE_ORDER 16
Expand Down
19 changes: 17 additions & 2 deletions src/kernel/pagecache.c
Original file line number Diff line number Diff line change
Expand Up @@ -740,12 +740,23 @@ u64 pagecache_drain(u64 drain_bytes)
{
pagecache pc = global_pagecache;
u64 pages = pad(drain_bytes, cache_pagesize(pc)) >> pc->page_order;
u64 drained = 0;

pagecache_lock_state(pc);
u64 evicted = evict_pages_locked(pc, pages);
do {
u64 evicted = evict_pages_locked(pc, pages);
drained += cache_drain((caching_heap)pc->contiguous, drain_bytes - drained,
PAGECACHE_PAGES_RETAIN * cache_pagesize(pc));
if (evicted < pages)
break;
pages *= 2;
} while (drained < drain_bytes);
balance_page_lists_locked(pc);
pagecache_unlock_state(pc);
return evicted << pc->page_order;
if (drained < drain_bytes)
drained += cache_drain((caching_heap)pc->completions, drain_bytes - drained,
PAGECACHE_COMPLETIONS_RETAIN * sizeof(struct page_completion));
return drained;
}

/* TODO could encode completion to indicate completion on transition
Expand Down Expand Up @@ -1646,7 +1657,11 @@ void init_pagecache(heap general, heap contiguous, heap physical, u64 pagesize)
pc->page_order = find_order(pagesize);
assert(pagesize == U64_FROM_BIT(pc->page_order));
pc->h = general;
#ifdef KERNEL
pc->contiguous = (heap)allocate_objcache(general, contiguous, PAGESIZE, PAGESIZE_2M, true);
#else
pc->contiguous = contiguous;
#endif
pc->physical = physical;
pc->zero_page = allocate_zero(contiguous, pagesize);
assert(pc->zero_page != INVALID_ADDRESS);
Expand Down

0 comments on commit 7fe44ea

Please sign in to comment.