Skip to content

Commit

Permalink
Merge pull request #515 from vacantron/jit/cache
Browse files Browse the repository at this point in the history
Drop unused structure "chain_entry_t"
  • Loading branch information
jserv authored Nov 24, 2024
2 parents f31bc84 + 4b32e32 commit ce548ff
Show file tree
Hide file tree
Showing 5 changed files with 148 additions and 123 deletions.
46 changes: 35 additions & 11 deletions src/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -196,33 +196,57 @@ void *cache_get(const cache_t *cache, uint32_t key, bool update)

void *cache_put(cache_t *cache, uint32_t key, void *value)
{
void *delete_value = NULL;
assert(cache->list_size <= cache->capacity);
/* check the cache is full or not before adding a new entry */
if (cache->list_size == cache->capacity) {

lfu_entry_t *replaced_entry = NULL, *entry;
hlist_for_each_entry (entry, &cache->map->ht_list_head[cache_hash(key)],
ht_list) {
if (entry->key != key)
continue;
/* update the existing cache */
if (entry->value != value) {
replaced_entry = entry;
break;
}
/* should not put an identical block to cache */
assert(NULL);
__UNREACHABLE;
}

/* get the entry to be replaced if cache is full */
if (!replaced_entry && cache->list_size == cache->capacity) {
for (int i = 0; i < THRESHOLD; i++) {
if (list_empty(cache->lists[i]))
continue;
lfu_entry_t *delete_target =
replaced_entry =
list_last_entry(cache->lists[i], lfu_entry_t, list);
list_del_init(&delete_target->list);
hlist_del_init(&delete_target->ht_list);
delete_value = delete_target->value;
cache->list_size--;
mpool_free(cache_mp, delete_target);
break;
}
assert(replaced_entry);
}

void *replaced_value = NULL;
if (replaced_entry) {
replaced_value = replaced_entry->value;
list_del_init(&replaced_entry->list);
hlist_del_init(&replaced_entry->ht_list);
mpool_free(cache_mp, replaced_entry);
cache->list_size--;
}

lfu_entry_t *new_entry = mpool_alloc(cache_mp);
INIT_LIST_HEAD(&new_entry->list);
INIT_HLIST_NODE(&new_entry->ht_list);
new_entry->key = key;
new_entry->value = value;
new_entry->frequency = 0;
list_add(&new_entry->list, cache->lists[new_entry->frequency++]);
cache->list_size++;
hlist_add_head(&new_entry->ht_list,
&cache->map->ht_list_head[cache_hash(key)]);
cache->list_size++;

assert(cache->list_size <= cache->capacity);
return delete_value;
return replaced_value;
}

void cache_free(cache_t *cache)
Expand Down
200 changes: 100 additions & 100 deletions src/emulate.c
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,37 @@ FORCE_INLINE bool insn_is_unconditional_branch(uint8_t opcode)
case rv_insn_cebreak:
#endif
return true;
default:
return false;
}
}

FORCE_INLINE bool insn_is_direct_branch(uint8_t opcode)
{
switch (opcode) {
case rv_insn_jal:
#if RV32_HAS(EXT_C)
case rv_insn_cjal:
case rv_insn_cj:
#endif
return true;
default:
return false;
}
}

FORCE_INLINE bool insn_is_indirect_branch(uint8_t opcode)
{
switch (opcode) {
case rv_insn_jalr:
#if RV32_HAS(EXT_C)
case rv_insn_cjalr:
case rv_insn_cjr:
#endif
return true;
default:
return false;
}
return false;
}

static void block_translate(riscv_t *rv, block_t *block)
Expand Down Expand Up @@ -571,11 +600,7 @@ static void block_translate(riscv_t *rv, block_t *block)
#endif
/* stop on branch */
if (insn_is_branch(ir->opcode)) {
if (ir->opcode == rv_insn_jalr
#if RV32_HAS(EXT_C)
|| ir->opcode == rv_insn_cjalr || ir->opcode == rv_insn_cjr
#endif
) {
if (insn_is_indirect_branch(ir->opcode)) {
ir->branch_table = calloc(1, sizeof(branch_history_table_t));
assert(ir->branch_table);
memset(ir->branch_table->PC, -1,
Expand Down Expand Up @@ -768,95 +793,89 @@ static block_t *block_find_or_translate(riscv_t *rv)
#if !RV32_HAS(JIT)
block_map_t *map = &rv->block_map;
/* lookup the next block in the block map */
block_t *next = block_find(map, rv->PC);
block_t *next_blk = block_find(map, rv->PC);
#else
/* lookup the next block in the block cache */
block_t *next = (block_t *) cache_get(rv->block_cache, rv->PC, true);
/*
* The function "cache_get()" gets the cached block by the given "key (PC)".
* In system simulation, the returned block might be dropped because it is
* not the one from the current process (by checking SATP CSR register).
*/
block_t *next_blk = (block_t *) cache_get(rv->block_cache, rv->PC, true);
#endif

if (!next) {
if (next_blk)
return next_blk;

#if !RV32_HAS(JIT)
if (map->size * 1.25 > map->block_capacity) {
block_map_clear(rv);
prev = NULL;
}
/* clear block list if it is going to be filled */
if (map->size * 1.25 > map->block_capacity) {
block_map_clear(rv);
prev = NULL;
}
#endif
/* allocate a new block */
next = block_alloc(rv);
block_translate(rv, next);
/* allocate a new block */
next_blk = block_alloc(rv);

block_translate(rv, next_blk);

optimize_constant(rv, next);
optimize_constant(rv, next_blk);
#if RV32_HAS(GDBSTUB)
if (likely(!rv->debug_mode))
if (likely(!rv->debug_mode))
#endif
/* macro operation fusion */
match_pattern(rv, next);
/* macro operation fusion */
match_pattern(rv, next_blk);

#if !RV32_HAS(JIT)
/* insert the block into block map */
block_insert(&rv->block_map, next);
/* insert the block into block map */
block_insert(&rv->block_map, next_blk);
#else
/* insert the block into block cache */
block_t *delete_target = cache_put(rv->block_cache, rv->PC, &(*next));
if (delete_target) {
if (prev == delete_target)
prev = NULL;
chain_entry_t *entry, *safe;
/* correctly remove deleted block from its chained block */
rv_insn_t *taken = delete_target->ir_tail->branch_taken,
*untaken = delete_target->ir_tail->branch_untaken;
if (taken && taken->pc != delete_target->pc_start) {
block_t *target = cache_get(rv->block_cache, taken->pc, false);
bool flag = false;
list_for_each_entry_safe (entry, safe, &target->list, list) {
if (entry->block == delete_target) {
list_del_init(&entry->list);
mpool_free(rv->chain_entry_mp, entry);
flag = true;
}
}
assert(flag);
}
if (untaken && untaken->pc != delete_target->pc_start) {
block_t *target =
cache_get(rv->block_cache, untaken->pc, false);
assert(target);
bool flag = false;
list_for_each_entry_safe (entry, safe, &target->list, list) {
if (entry->block == delete_target) {
list_del_init(&entry->list);
mpool_free(rv->chain_entry_mp, entry);
flag = true;
}
}
assert(flag);
}
/* correctly remove deleted block from the block chained to it */
list_for_each_entry_safe (entry, safe, &delete_target->list, list) {
if (entry->block == delete_target)
continue;
rv_insn_t *target = entry->block->ir_tail;
if (target->branch_taken == delete_target->ir_head)
target->branch_taken = NULL;
else if (target->branch_untaken == delete_target->ir_head)
target->branch_untaken = NULL;
mpool_free(rv->chain_entry_mp, entry);
}
/* free deleted block */
uint32_t idx;
rv_insn_t *ir, *next;
for (idx = 0, ir = delete_target->ir_head;
idx < delete_target->n_insn; idx++, ir = next) {
free(ir->fuse);
next = ir->next;
mpool_free(rv->block_ir_mp, ir);
}
mpool_free(rv->block_mp, delete_target);
list_add(&next_blk->list, &rv->block_list);

/* insert the block into block cache */
block_t *replaced_blk = cache_put(rv->block_cache, rv->PC, &(*next_blk));

if (!replaced_blk)
return next_blk;

list_del_init(&replaced_blk->list);

if (prev == replaced_blk)
prev = NULL;

/* remove the connection from parents */
rv_insn_t *replaced_blk_entry = replaced_blk->ir_head;

/* TODO: record parents of each block to avoid traversing all blocks */
block_t *entry;
list_for_each_entry (entry, &rv->block_list, list) {
rv_insn_t *taken = entry->ir_tail->branch_taken,
*untaken = entry->ir_tail->branch_untaken;

if (taken == replaced_blk_entry) {
entry->ir_tail->branch_taken = NULL;
}
#endif
if (untaken == replaced_blk_entry) {
entry->ir_tail->branch_untaken = NULL;
}
}

/* free IRs in replaced block */
for (rv_insn_t *ir = replaced_blk->ir_head, *next_ir; ir != NULL;
ir = next_ir) {
next_ir = ir->next;

if (ir->fuse)
free(ir->fuse);

mpool_free(rv->block_ir_mp, ir);
}

return next;
mpool_free(rv->block_mp, replaced_blk);
#endif

assert(next_blk);
return next_blk;
}

#if RV32_HAS(JIT)
Expand Down Expand Up @@ -918,31 +937,12 @@ void rv_step(void *arg)
if (!insn_is_unconditional_branch(last_ir->opcode)) {
if (is_branch_taken && !last_ir->branch_taken) {
last_ir->branch_taken = block->ir_head;
#if RV32_HAS(JIT)
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
new_entry->block = prev;
list_add(&new_entry->list, &block->list);
#endif
} else if (!is_branch_taken && !last_ir->branch_untaken) {
last_ir->branch_untaken = block->ir_head;
#if RV32_HAS(JIT)
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
new_entry->block = prev;
list_add(&new_entry->list, &block->list);
#endif
}
} else if (IF_insn(last_ir, jal)
#if RV32_HAS(EXT_C)
|| IF_insn(last_ir, cj) || IF_insn(last_ir, cjal)
#endif
) {
} else if (insn_is_direct_branch(last_ir->opcode)) {
if (!last_ir->branch_taken) {
last_ir->branch_taken = block->ir_head;
#if RV32_HAS(JIT)
chain_entry_t *new_entry = mpool_alloc(rv->chain_entry_mp);
new_entry->block = prev;
list_add(&new_entry->list, &block->list);
#endif
}
}
}
Expand Down
5 changes: 1 addition & 4 deletions src/riscv.c
Original file line number Diff line number Diff line change
Expand Up @@ -315,9 +315,7 @@ riscv_t *rv_create(riscv_user_t rv_attr)
/* initialize the block map */
block_map_init(&rv->block_map, BLOCK_MAP_CAPACITY_BITS);
#else
rv->chain_entry_mp =
mpool_create(sizeof(chain_entry_t) << BLOCK_IR_MAP_CAPACITY_BITS,
sizeof(chain_entry_t));
INIT_LIST_HEAD(&rv->block_list);
rv->jit_state = jit_state_init(CODE_CACHE_SIZE);
rv->block_cache = cache_create(BLOCK_MAP_CAPACITY_BITS);
assert(rv->block_cache);
Expand Down Expand Up @@ -426,7 +424,6 @@ void rv_delete(riscv_t *rv)
pthread_mutex_destroy(&rv->wait_queue_lock);
jit_cache_exit(rv->jit_cache);
#endif
mpool_destroy(rv->chain_entry_mp);
jit_state_exit(rv->jit_state);
cache_free(rv->block_cache);
#endif
Expand Down
10 changes: 2 additions & 8 deletions src/riscv_private.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,18 +100,12 @@ typedef struct block {
#endif
} block_t;

#if RV32_HAS(JIT)
typedef struct {
block_t *block;
struct list_head list;
} chain_entry_t;
#if RV32_HAS(T2C)
#if RV32_HAS(JIT) && RV32_HAS(T2C)
typedef struct {
block_t *block;
struct list_head list;
} queue_entry_t;
#endif
#endif

typedef struct {
uint32_t block_capacity; /**< max number of entries in the block map */
Expand Down Expand Up @@ -178,7 +172,7 @@ struct riscv_internal {
block_map_t block_map; /**< basic block map */
#else
struct cache *block_cache;
struct mpool *chain_entry_mp;
struct list_head block_list; /**< list of all translated blocks */
#if RV32_HAS(T2C)
struct list_head wait_queue;
pthread_mutex_t wait_queue_lock;
Expand Down
10 changes: 10 additions & 0 deletions src/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -106,12 +106,22 @@ static inline void list_del_init(struct list_head *node)
list_entry((head)->prev, type, member)

#ifdef __HAVE_TYPEOF
#define list_for_each_entry(entry, head, member) \
for (entry = list_entry((head)->next, __typeof__(*entry), member); \
&entry->member != (head); \
entry = list_entry(entry->member.next, __typeof__(*entry), member))

#define list_for_each_entry_safe(entry, safe, head, member) \
for (entry = list_entry((head)->next, __typeof__(*entry), member), \
safe = list_entry(entry->member.next, __typeof__(*entry), member); \
&entry->member != (head); entry = safe, \
safe = list_entry(safe->member.next, __typeof__(*entry), member))
#else
#define list_for_each_entry(entry, head, member, type) \
for (entry = list_entry((head)->next, type, member); \
&entry->member != (head); \
entry = list_entry(entry->member.next, type, member))

#define list_for_each_entry_safe(entry, safe, head, member, type) \
for (entry = list_entry((head)->next, type, member), \
safe = list_entry(entry->member.next, type, member); \
Expand Down

0 comments on commit ce548ff

Please sign in to comment.