From adeaa79e2778fa3b0feaef70730e13d2749f8604 Mon Sep 17 00:00:00 2001 From: Kai Vehmanen Date: Mon, 2 Oct 2023 13:52:16 +0300 Subject: [PATCH] west: test Zephyr PR 63370 Modify SOF code to adapt to interface changes in Zephyr commit baea37aeb431 ("kernel: Re-factor sys_mem_blocks definition") Signed-off-by: Kai Vehmanen --- west.yml | 2 +- zephyr/lib/regions_mm.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/west.yml b/west.yml index 8543b993e2b1..a13335f662c6 100644 --- a/west.yml +++ b/west.yml @@ -45,7 +45,7 @@ manifest: - name: zephyr repo-path: zephyr - revision: 492517b918d267f553688cd6b9d59b92ffc10f91 + revision: pull/63370/head remote: zephyrproject # Import some projects listed in zephyr/west.yml@revision diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index feb1f55d48a4..dbb1aa087b26 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -132,8 +132,8 @@ struct vmh_heap *vmh_init_heap(const struct vmh_heap_config *cfg, new_heap->physical_blocks_allocators[i] = new_allocator; /* Fill allocators data based on config and virtual region data */ - new_allocator->num_blocks = cfg->block_bundles_table[i].number_of_blocks; - new_allocator->blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); + new_allocator->info.num_blocks = cfg->block_bundles_table[i].number_of_blocks; + new_allocator->info.blk_sz_shift = ilog2(cfg->block_bundles_table[i].block_size); new_allocator->buffer = (uint8_t *)new_heap->virtual_region->addr + offset; /* Create bit array that is a part of mem_block kept as a ptr */ @@ -270,11 +270,11 @@ void *vmh_alloc(struct vmh_heap *heap, uint32_t alloc_size) * mem_block. */ block_size = - 1 << heap->physical_blocks_allocators[mem_block_iterator]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iterator]->info.blk_sz_shift; block_count = SOF_DIV_ROUND_UP((uint64_t)alloc_size, (uint64_t)block_size); if (block_count > - heap->physical_blocks_allocators[mem_block_iterator]->num_blocks) + heap->physical_blocks_allocators[mem_block_iterator]->info.num_blocks) continue; /* Try span alloc on first available mem_block for non span * check if block size is sufficient. @@ -455,7 +455,7 @@ int vmh_free_heap(struct vmh_heap *heap) if (!heap->physical_blocks_allocators[i]) continue; if (!sys_bitarray_is_region_cleared(heap->physical_blocks_allocators[i]->bitmap, - heap->physical_blocks_allocators[i]->num_blocks, 0)) + heap->physical_blocks_allocators[i]->info.num_blocks, 0)) return -ENOTEMPTY; } @@ -503,13 +503,13 @@ int vmh_free(struct vmh_heap *heap, void *ptr) mem_block_iter < MAX_MEMORY_ALLOCATORS_COUNT; mem_block_iter++) { block_size = - 1 << heap->physical_blocks_allocators[mem_block_iter]->blk_sz_shift; + 1 << heap->physical_blocks_allocators[mem_block_iter]->info.blk_sz_shift; if (vmh_is_ptr_in_memory_range((uintptr_t)ptr, (uintptr_t)heap->physical_blocks_allocators [mem_block_iter]->buffer, heap->physical_blocks_allocators - [mem_block_iter]->num_blocks * block_size)) { + [mem_block_iter]->info.num_blocks * block_size)) { ptr_range_found = true; break; } @@ -556,7 +556,7 @@ int vmh_free(struct vmh_heap *heap, void *ptr) */ size_t bits_to_check = heap->physical_blocks_allocators - [mem_block_iter]->num_blocks - ptr_bit_array_position; + [mem_block_iter]->info.num_blocks - ptr_bit_array_position; /* Neeeeeeeds optimization - thinking how to do it properly * each set bit in order after another means one allocated block.