Skip to content

Commit

Permalink
Speedup LRU by using the faster timestamp API (#3990)
Browse files Browse the repository at this point in the history
* Speedup LRU by using the faster timestamp API

Signed-off-by: Alan Jowett <[email protected]>

* PR feedback

Signed-off-by: Alan Jowett <[email protected]>

* PR feedback

Signed-off-by: Alan Jowett <[email protected]>

---------

Signed-off-by: Alan Jowett <[email protected]>
Co-authored-by: Alan Jowett <[email protected]>
  • Loading branch information
Alan-Jowett and Alan Jowett authored Nov 13, 2024
1 parent f1b2c0f commit 0a7fddd
Show file tree
Hide file tree
Showing 6 changed files with 41 additions and 15 deletions.
8 changes: 4 additions & 4 deletions libs/execution_context/ebpf_core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2178,17 +2178,17 @@ _ebpf_core_random_uint32()
static uint64_t
_ebpf_core_get_time_since_boot_ns()
{
// ebpf_query_time_since_boot returns time elapsed since
// ebpf_query_time_since_boot_precise returns time elapsed since
// boot in units of 100 ns.
return ebpf_query_time_since_boot(true) * EBPF_NS_PER_FILETIME;
return ebpf_query_time_since_boot_precise(true) * EBPF_NS_PER_FILETIME;
}

static uint64_t
_ebpf_core_get_time_ns()
{
// ebpf_query_time_since_boot returns time elapsed since
// ebpf_query_time_since_boot_precise returns time elapsed since
// boot in units of 100 ns.
return ebpf_query_time_since_boot(false) * EBPF_NS_PER_FILETIME;
return ebpf_query_time_since_boot_precise(false) * EBPF_NS_PER_FILETIME;
}

static uint64_t
Expand Down
8 changes: 5 additions & 3 deletions libs/execution_context/ebpf_maps.c
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,8 @@ __declspec(align(EBPF_CACHE_LINE_SIZE)) typedef struct _ebpf_lru_partition
size_t hot_list_limit; //< Maximum size of the hot list.
} ebpf_lru_partition_t;

static_assert(sizeof(ebpf_lru_partition_t) % EBPF_CACHE_LINE_SIZE == 0, "ebpf_core_lru_map_t is not cache aligned.");

/**
* @brief The map definition for an LRU map.
*/
Expand Down Expand Up @@ -1203,15 +1205,15 @@ _insert_into_hot_list(_Inout_ ebpf_core_lru_map_t* map, size_t partition, _Inout
switch (key_state) {
case EBPF_LRU_KEY_UNINITIALIZED:
EBPF_LRU_ENTRY_GENERATION_PTR(map, entry)[partition] = map->partitions[partition].current_generation;
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot(false);
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot_approximate(false);
ebpf_list_insert_tail(
&map->partitions[partition].hot_list, &EBPF_LRU_ENTRY_LIST_ENTRY_PTR(map, entry)[partition]);
map->partitions[partition].hot_list_size++;
break;
case EBPF_LRU_KEY_COLD:
// Remove from cold list.
EBPF_LRU_ENTRY_GENERATION_PTR(map, entry)[partition] = map->partitions[partition].current_generation;
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot(false);
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot_approximate(false);
ebpf_list_remove_entry(&EBPF_LRU_ENTRY_LIST_ENTRY_PTR(map, entry)[partition]);
ebpf_list_insert_tail(
&map->partitions[partition].hot_list, &EBPF_LRU_ENTRY_LIST_ENTRY_PTR(map, entry)[partition]);
Expand Down Expand Up @@ -1255,7 +1257,7 @@ _initialize_lru_entry(
// Only insert into the current partition's hot list.
ebpf_lock_state_t state = ebpf_lock_lock(&map->partitions[partition].lock);
EBPF_LRU_ENTRY_GENERATION_PTR(map, entry)[partition] = map->partitions[partition].current_generation;
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot(false);
EBPF_LRU_ENTRY_LAST_USED_TIME_PTR(map, entry)[partition] = ebpf_query_time_since_boot_approximate(false);
ebpf_list_insert_tail(&map->partitions[partition].hot_list, &EBPF_LRU_ENTRY_LIST_ENTRY_PTR(map, entry)[partition]);
map->partitions[partition].hot_list_size++;

Expand Down
8 changes: 4 additions & 4 deletions libs/execution_context/ebpf_program.c
Original file line number Diff line number Diff line change
Expand Up @@ -2435,7 +2435,7 @@ _ebpf_program_test_run_work_item(_In_ cxplat_preemptible_work_item_t* work_item,
state_stored = true;
}

uint64_t start_time = ebpf_query_time_since_boot(false);
uint64_t start_time = ebpf_query_time_since_boot_precise(false);
// Use a counter instead of performing a modulus operation to determine when to start a new epoch.
// This is because the modulus operation is expensive and we want to minimize the overhead of
// the test run.
Expand All @@ -2448,7 +2448,7 @@ _ebpf_program_test_run_work_item(_In_ cxplat_preemptible_work_item_t* work_item,
ebpf_epoch_exit(&epoch_state);
if (ebpf_should_yield_processor()) {
// Compute the elapsed time since the last yield.
end_time = ebpf_query_time_since_boot(false);
end_time = ebpf_query_time_since_boot_precise(false);

// Add the elapsed time to the cumulative time.
cumulative_time += end_time - start_time;
Expand All @@ -2460,7 +2460,7 @@ _ebpf_program_test_run_work_item(_In_ cxplat_preemptible_work_item_t* work_item,
old_irql = ebpf_raise_irql(context->required_irql);

// Reset the start time.
start_time = ebpf_query_time_since_boot(false);
start_time = ebpf_query_time_since_boot_precise(false);
}
ebpf_epoch_enter(&epoch_state);
}
Expand All @@ -2470,7 +2470,7 @@ _ebpf_program_test_run_work_item(_In_ cxplat_preemptible_work_item_t* work_item,
break;
}
}
end_time = ebpf_query_time_since_boot(false);
end_time = ebpf_query_time_since_boot_precise(false);

cumulative_time += end_time - start_time;

Expand Down
13 changes: 12 additions & 1 deletion libs/runtime/ebpf_platform.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ ebpf_allocate_process_state()
}

uint64_t
ebpf_query_time_since_boot(bool include_suspended_time)
ebpf_query_time_since_boot_precise(bool include_suspended_time)
{
uint64_t qpc_time;
if (include_suspended_time) {
Expand All @@ -237,6 +237,17 @@ ebpf_query_time_since_boot(bool include_suspended_time)
}
}

uint64_t
ebpf_query_time_since_boot_approximate(bool include_suspend_time)
{
if (include_suspend_time) {
ebpf_assert(!"Include suspend time not supported on this platform.");
return 0;
} else {
return KeQueryInterruptTime();
}
}

MDL*
ebpf_map_memory(size_t length)
{
Expand Down
15 changes: 14 additions & 1 deletion libs/runtime/ebpf_platform.h
Original file line number Diff line number Diff line change
Expand Up @@ -654,7 +654,20 @@ extern "C"
*/
EBPF_INLINE_HINT
uint64_t
ebpf_query_time_since_boot(bool include_suspended_time);
ebpf_query_time_since_boot_precise(bool include_suspended_time);

/**
* @brief Return time elapsed since boot in units of 100 nanoseconds.
* This function is faster than ebpf_query_time_since_boot_precise() but may not
* be as accurate.
*
* @param[in] include_suspended_time Include time the system spent in a suspended state.
*
* @return Time elapsed since boot in 100 nanosecond units.
*/
EBPF_INLINE_HINT
uint64_t
ebpf_query_time_since_boot_approximate(bool include_suspended_time);

/**
* @brief Affinitize the current thread to a specific CPU by index and return the old affinity.
Expand Down
4 changes: 2 additions & 2 deletions tests/performance/platform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ _perf_bpf_ktime_get_boot_ns()
uint64_t time;
ebpf_epoch_state_t epoch_state;
ebpf_epoch_enter(&epoch_state);
time = ebpf_query_time_since_boot(true) * EBPF_NS_PER_FILETIME;
time = ebpf_query_time_since_boot_precise(true) * EBPF_NS_PER_FILETIME;
ebpf_epoch_exit(&epoch_state);
}

Expand All @@ -56,7 +56,7 @@ _perf_bpf_ktime_get_ns()
uint64_t time;
ebpf_epoch_state_t epoch_state;
ebpf_epoch_enter(&epoch_state);
time = ebpf_query_time_since_boot(false) * EBPF_NS_PER_FILETIME;
time = ebpf_query_time_since_boot_precise(false) * EBPF_NS_PER_FILETIME;
ebpf_epoch_exit(&epoch_state);
}

Expand Down

0 comments on commit 0a7fddd

Please sign in to comment.