Skip to content

Commit

Permalink
implement master pool
Browse files Browse the repository at this point in the history
  • Loading branch information
radkesvat committed Aug 3, 2024
1 parent 99eceb4 commit 9f5c2e7
Show file tree
Hide file tree
Showing 2 changed files with 147 additions and 0 deletions.
55 changes: 55 additions & 0 deletions ww/master_pool.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#include "master_pool.h"

static void poolFirstCharge(master_pool_t *pool)
{
hhybridmutex_lock(&(pool->mutex));
pool->len = pool->cap / 2;
for (size_t i = 0; i < pool->len; i++)
{
pool->available[i] = pool->create_item_handle(pool);
}
hhybridmutex_unlock(&(pool->mutex));
}

master_pool_t *newMasterPoolWithCap(unsigned int pool_width, MasterPoolItemCreateHandle create_h,
MasterPoolItemDestroyHandle destroy_h)
{

pool_width = (max(1, pool_width) + 15) & ~0x0F;
// half of the pool is used, other half is free at startup
pool_width = 2 * pool_width;

const unsigned long container_len = pool_width * sizeof(master_pool_item_t *);

int64_t memsize = (int64_t) (sizeof(master_pool_t) + container_len);
// ensure we have enough space to offset the allocation by line cache (for alignment)
MUSTALIGN2(memsize + ((kCpuLineCacheSize + 1) / 2), kCpuLineCacheSize);
memsize = ALIGN2(memsize + ((kCpuLineCacheSize + 1) / 2), kCpuLineCacheSize);

// check for overflow
if (memsize < (int64_t) sizeof(master_pool_t))
{
fprintf(stderr, "buffer size out of range");
exit(1);
}

// allocate memory, placing master_pool_t at a line cache address boundary
uintptr_t ptr = (uintptr_t) globalMalloc(memsize);

MUSTALIGN2(ptr, kCpuLineCacheSize);

// align pointer to line cache boundary
master_pool_t *pool_ptr = (master_pool_t *) ALIGN2(ptr, kCpuLineCacheSize); // NOLINT

#ifdef DEBUG
memset(pool_ptr, 0xEB, sizeof(master_pool_t) + container_len);
#endif

master_pool_t pool = {
.memptr = pool_ptr, .cap = pool_width, .create_item_handle = create_h, .destroy_item_handle = destroy_h};

memcpy(pool_ptr, &pool, sizeof(master_pool_t));
hhybridmutex_init(&(pool_ptr->mutex));
poolFirstCharge(pool_ptr);
return pool_ptr;
}
92 changes: 92 additions & 0 deletions ww/master_pool.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
#pragma once

#include "hmutex.h"
#include "utils/mathutils.h"
#include "ww.h"

struct master_pool_s;
typedef void master_pool_item_t;

typedef master_pool_item_t *(*MasterPoolItemCreateHandle)(struct master_pool_s *pool);
typedef void (*MasterPoolItemDestroyHandle)(struct master_pool_s *pool, master_pool_item_t *item);

/*
do not read this pool properties from the struct, its a multi-threaded object
*/
typedef struct master_pool_s
{
void *memptr;
hhybridmutex_t mutex;
MasterPoolItemCreateHandle create_item_handle;
MasterPoolItemDestroyHandle destroy_item_handle;
atomic_uint len;
unsigned int cap; // fixed
void *available[];
} master_pool_t;

static inline void popMasterPoolItems(master_pool_t *pool, master_pool_item_t **iptr, unsigned int count)
{

if (atomic_load_explicit(&(pool->len), memory_order_relaxed) > 0)
{
hhybridmutex_lock(&(pool->mutex));
const unsigned int tmp_len = atomic_load_explicit(&(pool->len), memory_order_relaxed);
const unsigned int consumed = min(tmp_len, count);
if (consumed > 0)
{
atomic_fetch_add_explicit(&(pool->len), -consumed, memory_order_relaxed);
const unsigned int pbase = (tmp_len - consumed);
unsigned int i = 0;
for (; i < consumed; i++)
{
iptr[i] = pool->available[pbase + i];
}
for (; i < count; i++)
{
iptr[i] = pool->create_item_handle(pool);
}
}

hhybridmutex_unlock(&(pool->mutex));
return;
}
for (unsigned int i = 0; i < count; i++)
{
iptr[i] = pool->create_item_handle(pool);
}
}

static inline void reuseMasterPoolItems(master_pool_t *pool, master_pool_item_t **iptr, unsigned int count)
{
if (pool->cap - atomic_load_explicit(&(pool->len), memory_order_relaxed) == 0)
{
for (unsigned int i = 0; i < count; i++)
{
pool->destroy_item_handle(pool, iptr[i]);
}
return;
}
hhybridmutex_lock(&(pool->mutex));

const unsigned int tmp_len = atomic_load_explicit(&(pool->len), memory_order_relaxed);
const unsigned int consumed = min(pool->cap - tmp_len, count);

atomic_fetch_add_explicit(&(pool->len), consumed, memory_order_relaxed);

if (consumed > 0)
{
unsigned int i = 0;
for (; i < consumed; i++)
{
pool->available[i + tmp_len] = iptr[i];
}
for (; i < count; i++)
{
pool->destroy_item_handle(pool, iptr[i]);
}
}
hhybridmutex_unlock(&(pool->mutex));
}

master_pool_t *newMasterPoolWithCap(unsigned int pool_width, MasterPoolItemCreateHandle create_h,
MasterPoolItemDestroyHandle destroy_h);

0 comments on commit 9f5c2e7

Please sign in to comment.