From 66557712bcd812ddfd254a49549cd262163a4384 Mon Sep 17 00:00:00 2001 From: Matteo Perotti Date: Tue, 3 Dec 2024 19:38:25 +0100 Subject: [PATCH] [cheshire/sw] Add RVV tests --- cheshire/sw/Makefile | 11 +- cheshire/sw/include/rvv_test.h | 348 ++++++++++++++++++ ..._mmu_stub_unit_stride_comprehensive.c.body | 236 ++++++++++++ ...u_stub_unit_stride_ld_comprehensive.c.body | 252 +++++++++++++ .../body/rvv_test_vstart_unit_stride.c.body | 159 ++++++++ ...art_unit_stride_mmu_stub_page_fault.c.body | 148 ++++++++ .../sw/src/tests/regfile_text_stub_regs.c | 68 ++++ cheshire/sw/src/tests/rvv_test_exceptions.c | 207 +++++++++++ ..._stride_comprehensive_page_fault_var_lat.c | 16 + ..._comprehensive_page_fault_var_lat_var_ex.c | 16 + ...u_stub_unit_stride_comprehensive_var_lat.c | 16 + ...v_test_mmu_stub_unit_stride_corner_cases.c | 277 ++++++++++++++ ...ride_ld_comprehensive_page_fault_var_lat.c | 16 + ..._comprehensive_page_fault_var_lat_var_ex.c | 16 + ...tub_unit_stride_ld_comprehensive_var_lat.c | 16 + .../rvv_test_mmu_stub_unit_stride_reshuffle.c | 237 ++++++++++++ ...v_test_mmu_stub_unit_stride_st_reshuffle.c | 221 +++++++++++ cheshire/sw/src/tests/rvv_test_vstart_csrs.c | 83 +++++ ..._unit_stride_mmu_stub_page_fault_var_lat.c | 15 + ..._mmu_stub_page_fault_var_lat_mmu_req_gen.c | 20 + ...test_vstart_unit_stride_mmu_stub_var_lat.c | 15 + 21 files changed, 2389 insertions(+), 4 deletions(-) create mode 100644 cheshire/sw/include/rvv_test.h create mode 100644 cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_comprehensive.c.body create mode 100644 cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body create mode 100644 cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride.c.body create mode 100644 cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body create mode 100644 cheshire/sw/src/tests/regfile_text_stub_regs.c create mode 100644 cheshire/sw/src/tests/rvv_test_exceptions.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat_var_ex.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_var_lat.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_corner_cases.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat_var_ex.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_var_lat.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_reshuffle.c create mode 100644 cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_st_reshuffle.c create mode 100644 cheshire/sw/src/tests/rvv_test_vstart_csrs.c create mode 100644 cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat.c create mode 100644 cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat_mmu_req_gen.c create mode 100644 cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_var_lat.c diff --git a/cheshire/sw/Makefile b/cheshire/sw/Makefile index 9f3a8d919..ebce6d746 100644 --- a/cheshire/sw/Makefile +++ b/cheshire/sw/Makefile @@ -12,9 +12,10 @@ CHS_SW := $(CHS_ROOT)/sw ARA_SW := $(ARA_ROOT)/cheshire/sw ARA_APPS := $(ARA_ROOT)/apps -APPS := $(patsubst $(ARA_APPS)/%/main.c,%,$(shell find $(ARA_APPS) -name "main.c")) -SW_C := $(wildcard $(ARA_SW)/src/*.c) -DEPS_H := $(wildcard $(ARA_SW)/include/*.h) +APPS := $(patsubst $(ARA_APPS)/%/main.c,%,$(shell find $(ARA_APPS) -name "main.c")) +SW_C := $(wildcard $(ARA_SW)/src/*.c) +TESTS_C := $(wildcard $(ARA_SW)/src/tests/*.c) $(wildcard $(ARA_SW)/src/tests/body/*.body) +DEPS_H := $(wildcard $(ARA_SW)/include/*.h) # Hardware configuration for the Ara RVV kernels # Can be chosen in [2|4|8|16]_lanes @@ -25,6 +26,8 @@ include $(ARA_ROOT)/config/$(ARA_CONFIGURATION).mk CHS_SW_FLAGS ?= $(shell grep "^CHS_SW_FLAGS\s\+?=\s\+" -- $(CHS_SW)/sw.mk | sed 's/^.*?= //' | sed s/rv64gc/rv64gcv/) # Tweak the compilation to include Cheshire-related headers and files CHS_SW_FLAGS += -DCHESHIRE -DNR_LANES=$(nr_lanes) -DVLEN=$(vlen) +# Include the correct definitions for the RVV tests +CHS_SW_FLAGS += -DARA_NR_LANES=$(nr_lanes) -DEEW=$(eew) -DPRINTF=$(printf) # Vars and rules to make the Linux image include cva6-sdk.mk @@ -40,5 +43,5 @@ copy-vector-deps: $(DEPS_H) cp $^ $(CHS_SW)/tests # Copy the vector programs from the src folder to cheshire -copy-vector-sw: $(SW_C) +copy-vector-sw: $(SW_C) $(TESTS_C) cp $^ $(CHS_SW)/tests diff --git a/cheshire/sw/include/rvv_test.h b/cheshire/sw/include/rvv_test.h new file mode 100644 index 000000000..ebc48d06a --- /dev/null +++ b/cheshire/sw/include/rvv_test.h @@ -0,0 +1,348 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Vincenzo Maisto +// Matteo Perotti + +#ifndef __RVV_RVV_TEST_H__ +#define __RVV_RVV_TEST_H__ + +#include "regs/cheshire.h" + +#if (PRINTF == 1) +#include "printf.h" +#endif + +///////////////// +// SEW and EEW // +///////////////// + +// Public defines +#if EEW == 64 +#define _DTYPE __DTYPE(64) +#define _VSETVLI(vl,avl) _VSETVLI_64(vl, avl) +#define _VLD(vreg,address_load) __VLD(vreg,64,address_load) +#define _VST(vreg,address_store) __VST(vreg,64,address_store) +#elif EEW == 32 +#define _DTYPE __DTYPE(32) +#define _VSETVLI(vl,avl) _VSETVLI_32(vl, avl) +#define _VLD(vreg,address_load) __VLD(vreg,32,address_load) +#define _VST(vreg,address_store) __VST(vreg,32,address_store) +#elif EEW == 16 +#define _DTYPE __DTYPE(16) +#define _VSETVLI(vl,avl) _VSETVLI_16(vl, avl) +#define _VLD(vreg,address_load) __VLD(vreg,16,address_load) +#define _VST(vreg,address_store) __VST(vreg,16,address_store) +#elif EEW == 8 +#define _DTYPE __DTYPE(8) +#define _VSETVLI(vl,avl) _VSETVLI_8(vl, avl) +#define _VLD(vreg,address_load) __VLD(vreg,8,address_load) +#define _VST(vreg,address_store) __VST(vreg,8,address_store) +#else +#error "ERROR: No EEW was defined. Please specify one in [8,16,32,64]." +#endif + +#define _VADD(vd,vs1,vs2) asm volatile ("vadd.vv "#vd", "#vs1", "#vs2""); + +// Private defines +#define __DTYPE(eew) uint##eew##_t +#define __VLD(vreg,eew,address_load) asm volatile ("vle"#eew".v "#vreg", (%0)" : "+&r"(address_load)); +#define __VST(vreg,eew,address_store) asm volatile ("vse"#eew".v "#vreg", (%0)" : "+&r"(address_store)); + +/////////////////////// +// Reshuffle helpers // +/////////////////////// + +#define VSETVLI(vl, avl, eew, lmul) { asm volatile("vsetvli %0, %1, e"#eew", m"#lmul", ta, ma \n\t" : "=r" (vl) :"r" (avl) ); } + +#define _VSETVLI_64(vl, avl) { VSETVLI(vl, avl, 64, 8); } +#define _VSETVLI_32(vl, avl) { VSETVLI(vl, avl, 32, 8); } +#define _VSETVLI_16(vl, avl) { VSETVLI(vl, avl, 16, 8); } +#define _VSETVLI_8(vl, avl) { VSETVLI(vl, avl, 8, 8); } + +////////////////// +// Return codes // +////////////////// + +#define RET_CODE_SUCCESS 0 +#define RET_CODE_FAIL -1 +#define RET_CODE_WRONG_CASE -2 + +/////////////////////// +// SoC-level regfile // +/////////////////////// + +#define INIT_RVV_TEST_SOC_REGFILE \ +volatile uint32_t *rf_stub_ex_en = reg32(&__base_regs, CHESHIRE_STUB_EX_EN_REG_OFFSET); \ +volatile uint32_t *rf_stub_no_ex_lat = reg32(&__base_regs, CHESHIRE_STUB_NO_EX_LAT_REG_OFFSET); \ +volatile uint32_t *rf_req_rsp_lat = reg32(&__base_regs, CHESHIRE_STUB_REQ_RSP_LAT_REG_OFFSET); \ +volatile uint32_t *rf_virt_mem_en = reg32(&__base_regs, CHESHIRE_ARA_VIRT_MEM_EN_REG_OFFSET); \ +volatile uint32_t *rf_rvv_debug_reg = reg32(&__base_regs, CHESHIRE_RVV_DEBUG_REG_REG_OFFSET); \ +volatile uint32_t *rf_mmu_req_gen_en = reg32(&__base_regs, CHESHIRE_MMU_REQ_GEN_EN_REG_OFFSET); \ +volatile uint32_t *rf_mmu_req_gen_lat = reg32(&__base_regs, CHESHIRE_MMU_REQ_GEN_LAT_REG_OFFSET); + +////////////////////// +// Print facilities // +////////////////////// + +#define PRINT_INIT \ + uint32_t rtc_freq = *reg32(&__base_regs, CHESHIRE_RTC_FREQ_REG_OFFSET); \ + uint64_t reset_freq = clint_get_core_freq(rtc_freq, 2500); \ + uart_init(&__base_uart, reset_freq, 115200); \ + char uart_print_str[] = {'0', '\r', '\n'}; +#define PRINT_CHAR(byte) \ + uart_print_str[0] = (char) byte; \ + PRINT(uart_print_str) +#define PRINT(str) \ + uart_write_str(&__base_uart, str, sizeof(str)); \ + uart_write_flush(&__base_uart); + +///////////////////// +// Stub management // +///////////////////// + +// Enable virtual memory Ara->STUB requests +#define VIRTUAL_MEMORY(val) *rf_virt_mem_en = val; +#define VIRTUAL_MEMORY_ON *rf_virt_mem_en = 1; +#define VIRTUAL_MEMORY_OFF *rf_virt_mem_en = 0; +// Enable/disable exceptions from the stub. This registers also resets the status of the stub +// for what conerns the exceptions (e.g., the counter for the no-ex-latency). +#define STUB_EX(val) *rf_stub_ex_en = val; +#define STUB_EX_ON *rf_stub_ex_en = 1; +#define STUB_EX_OFF *rf_stub_ex_en = 0; +// Stub req-2-resp latency +#define STUB_REQ_RSP_LAT(lat) *rf_req_rsp_lat = lat; +// Exception latency (per transaction) +#define STUB_NO_EX_LAT(lat) *rf_stub_no_ex_lat = lat; +// Enable MMU req gen +#define MMU_REQ_GEN_EN(val) *rf_mmu_req_gen_en = val; +// MMU req gen ans-to-req latency +#define MMU_REQ_GEN_LAT(lat) *rf_mmu_req_gen_lat = lat; +// Reset SoC-CSRs +#define RESET_SOC_CSR *rf_virt_mem_en = 0; \ + *rf_stub_ex_en = 0; \ + *rf_req_rsp_lat = 0; \ + *rf_stub_no_ex_lat = 0; \ + *rf_mmu_req_gen_en = 0; \ + *rf_mmu_req_gen_lat = 0; \ + *rf_rvv_debug_reg = 0; + +/////////////// +// RVV Tests // +/////////////// + +#if (PRINTF == 1) +#define FAIL { printf("FAIL. retval: \d\n", ret_cnt + 1); return ret_cnt + 1; } +#else +#define FAIL { return ret_cnt + 1; } +#endif + +#define ASSERT_EQ(var, gold) if (var != gold) FAIL +#define ASSERT_TRUE(val) { if (!val) FAIL }; +#define ASSERT_FALSE(val) { if ( val) FAIL }; + +// Helper test macros +#define RVV_TEST_INIT(vl, avl) vl = reset_v_state ( avl ); exception = 0; +#define RVV_TEST_CLEANUP() RVV_TEST_ASSERT_EXCEPTION(0); exception = 0; +// BUG: Can't return a non-zero value from here... +// #define RVV_TEST_ASSERT( expression ) if ( !expression ) { return -1; } +// Quick workaround: +#define RVV_TEST_ASSERT( expression ) if ( !(expression) ) FAIL +#define RVV_TEST_ASSERT_EXCEPTION( val ) RVV_TEST_ASSERT ( exception == (uint64_t)(val) ); +#define RVV_TEST_ASSERT_EXCEPTION_EXTENDED( valid, tval, cause ) RVV_TEST_ASSERT ( ( exception == (uint64_t)(valid) ) \ + & ( mtval == (uint64_t)(tval) ) \ + & ( mcause == (uint64_t)(cause) ) \ + ); +#define RVV_TEST_CLEAN_EXCEPTION() exception = 0; mtval = 0; mcause = 0; + +#define VLMAX (1024 * ARA_NR_LANES) +#define VLBMAX VLMAX +#define ELMMAX VLMAX / (EEW / 8) +#ifndef RVV_TEST_AVL + #define RVV_TEST_AVL(EEW) (VLMAX / (EEW)) +#endif + +// Helper test variables +typedef uint64_t vcsr_dump_t [5]; +uint64_t exception; +uint64_t mtval; +uint64_t mcause; +uint64_t magic_out; +// Return counter to ease debug +uint64_t ret_cnt; + +void enable_rvv() { + // Enalbe RVV by seting MSTATUS.VS + asm volatile (" li t0, %0 " :: "i"(MSTATUS_VS)); + asm volatile (" csrs mstatus, t0" ); +} + +uint64_t reset_v_state ( uint64_t avl ) { + uint64_t vl_local = 0; + + asm volatile ( + "fence \n\t" +#if EEW == 64 + "vsetvli %0, %1, e64, m8, ta, ma \n\t" +#elif EEW == 32 + "vsetvli %0, %1, e32, m8, ta, ma \n\t" +#elif EEW == 16 + "vsetvli %0, %1, e16, m8, ta, ma \n\t" +#elif EEW == 8 + "vsetvli %0, %1, e8, m8, ta, ma \n\t" +#endif + "csrw vstart, 0 \n\t" + "csrw vcsr , 0 \n\t" + "fence \n\t" + : "=r" (vl_local) : "r" (avl) : + ); + + return vl_local; +} + +void vcsr_dump ( vcsr_dump_t vcsr_state ) { + asm volatile ( + "csrr %0, vstart \n\t" + "csrr %1, vtype \n\t" + "csrr %2, vl \n\t" + "csrr %3, vcsr \n\t" + "csrr %4, vlenb \n\t" + : "=r" (vcsr_state[0]), + "=r" (vcsr_state[1]), + "=r" (vcsr_state[2]), + "=r" (vcsr_state[3]), + "=r" (vcsr_state[4]) + ); +} + +// Override default weak trap vector +void trap_vector () { + // Set exception flag + exception = 1; + + // Save tval and mcause + mtval = 0; + mcause = 0; + asm volatile ("csrr %0, mtval" : "=r"(mtval)); + asm volatile ("csrr %0, mcause" : "=r"(mcause)); + + // Move PC ahead + // NOTE: PC = PC + 4, valid only for non-compressed trapping instructions + asm volatile ( + "nop;" + "csrr t6, mepc;" + "addi t6, t6, 4; # PC = PC + 4, valid only for non-compressed trapping instructions\n" + "csrw mepc, t6;" + "nop;" + ); +} + +#define INIT_NONZERO_VAL_ST 37 +#define MAGIC_NUM 5 + +// Maximum STUB req-rsp latency (power of 2 to speed up the code) +#define MAX_LAT_P2 8 + +#define MEM_BUS_BYTE 4 * ARA_NR_LANES + +// Helper +#define LOG2_4Ki 12 +// Max number of bursts in a single AXI unit-stride memory op +// 16 lanes, 16 KiB vector register (LMUL == 8) +// MAX 256 beats in a burst (BUS_WIDTH_min == 8B): 16KiB / (256 * 8B) = 8 +// No 4KiB page crossings: max bursts -> 16KiB / 4KiB + 1 = 5 +// Use a safe value higher than the previous bounds +#define MAX_BURSTS 16 + +typedef struct axi_burst_log_s { + // Number of bursts in this AXI transaction + uint64_t bursts; + // Number of vector elemetns per AXI burst + uint64_t burst_vec_elm[MAX_BURSTS]; + // Start address of each AXI burst + uint64_t burst_start_addr[MAX_BURSTS]; +} axi_burst_log_t; +axi_burst_log_t axi_log; + +// Get the number of elements correctly processed before the exception at burst T in [0,N_BURSTS-1]. +uint64_t get_body_elm_pre_exception(axi_burst_log_t *axi_log, uint64_t T) { + // Calculate how many elements before exception + uint64_t elm = 0; + for (int i = 0; i < T; i++) { + elm += axi_log->burst_vec_elm[i]; + } + return elm; +} + +// Get the number of bursts per vector unit-stride memory operation from an address and a number of elements +// with 2^(enc_ew) Byte each, and a memory bus of 2^(log2_balign) Byte. +axi_burst_log_t* get_unit_stride_bursts(axi_burst_log_t *axi_log, uint64_t addr, uint64_t vl_eff, uint64_t enc_ew, uint64_t log2_balign) { + // Requests are aligned to the memory bus + uint64_t aligned_addr = (addr >> log2_balign) << log2_balign; + + // Calculate the number of elements per burst + uint64_t start_addr_misaligned = addr; + uint64_t start_addr = aligned_addr; + uint64_t final_addr = start_addr_misaligned + (vl_eff << enc_ew); + uint64_t end_addr; + axi_log->bursts = 0; + while (start_addr < final_addr) { + // Find the end address (minimum address among the various limits) + // Burst cannot be made of more than 256 beats + uint64_t end_addr_lim_0 = start_addr + (256 << log2_balign); + // Burst cannot cross 4KiB pages + uint64_t end_addr_lim_1 = ((start_addr >> LOG2_4Ki) << LOG2_4Ki) + (1 << LOG2_4Ki); + // The end address is finally limited by the vector length + uint64_t end_addr_lim_2 = start_addr_misaligned + (vl_eff << enc_ew); + // Find the minimum end address + if (end_addr_lim_0 < end_addr_lim_1 && end_addr_lim_0 < end_addr_lim_2) { + end_addr = end_addr_lim_0; + } else if (end_addr_lim_1 < end_addr_lim_0 && end_addr_lim_1 < end_addr_lim_2) { + end_addr = end_addr_lim_1; + } else { + end_addr = end_addr_lim_2; + } + + // How many elements in this burst + uint64_t elm_per_burst = (end_addr - start_addr_misaligned) >> enc_ew; + vl_eff -= elm_per_burst; + // Log burst info + axi_log->burst_vec_elm[axi_log->bursts] = elm_per_burst; + axi_log->burst_start_addr[axi_log->bursts++] = start_addr_misaligned; + + // Find next start address + start_addr = end_addr; + // After the first burst, the address is always aligned with the bus width + start_addr_misaligned = start_addr; + } + + return axi_log; +} + +// Get the number of bursts per vector unit-stride AXI memory operation and the number of elements per burst. +// This function calculates the effective vl and address from vl, addr, and vstart, some other helpers, +// and then fall through the real function. +void get_unit_stride_bursts_wrap(axi_burst_log_t *axi_log, uint64_t addr, uint64_t vl, uint64_t ew, uint64_t mem_bus_byte, uint64_t vstart, uint8_t is_store) { + // Encode ew [bits] in a [byte] exponent + uint64_t enc_ew = (31 - __builtin_clz(ew)) - 3; + // Is this memory operation misaligned? + uint64_t is_misaligned = addr & (mem_bus_byte - 1); + // Calculate the effective memory bus width. Misaligned or vstart>0 stores get a reduced BW. + uint64_t eff_mem_bus_byte = (is_store && (is_misaligned || (vstart > 0))) ? (ew >> 3) : mem_bus_byte; + // Find log2 byte alignment + uint64_t log2_balign = (31 - __builtin_clz(eff_mem_bus_byte)); + // Effective starting address + uint64_t eff_addr = addr + (vstart << enc_ew); + uint64_t eff_vl = vl - vstart; + + return get_unit_stride_bursts(axi_log, eff_addr, eff_vl, enc_ew, log2_balign); +} + +// Quick pseudo-rand from 0 to max +uint64_t pseudo_rand(uint64_t max) { + static uint64_t x = 0; + return (x = (x + 7) % (max + 1)); +} + +#endif // __RVV_RVV_TEST_H__ diff --git a/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_comprehensive.c.body b/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_comprehensive.c.body new file mode 100644 index 000000000..8d5fa0d39 --- /dev/null +++ b/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_comprehensive.c.body @@ -0,0 +1,236 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +#if (EXTENSIVE_TEST == 1) +#define VL_LIMIT_LOW ELMMAX + 1 +#define VL_LIMIT_HIGH 0 +#define VSTART_LIMIT_LOW vl + 1 +#define VSTART_LIMIT_HIGH 0 +#else +#define VL_LIMIT_LOW 3*ARA_NR_LANES + 1 +#define VL_LIMIT_HIGH ELMMAX - (3*ARA_NR_LANES + 1) +#define VSTART_LIMIT_LOW 2*ARA_NR_LANES + 1 +#define VSTART_LIMIT_HIGH vl - 2*ARA_NR_LANES - 1 +#endif + +// Derived parameters +#define param_stub_ex { param_stub_ex_ctrl ? 1 : 0; } + +uint64_t stub_req_rsp_lat = param_stub_req_rsp_lat; + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_ON; + + // Vector configuration parameters and variables + uint64_t avl_original = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store [ELMMAX]; + _DTYPE* address_load = array_load; + _DTYPE* address_store = array_store; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: Exception generation and non-zero vstart: vector store + ////////////////////////////////////////////////////////////////// + + // Loop through different avl + for (uint64_t avl = 1; (avl <= VL_LIMIT_LOW || avl >= VL_LIMIT_HIGH) && avl <= ELMMAX + 1; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + + // Loop over vstart values. + for (uint64_t vstart_val = 0; (vstart_val <= VSTART_LIMIT_LOW || vstart_val >= VSTART_LIMIT_HIGH) && vstart_val < vl; vstart_val++) { + + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + // Turn off exceptions + STUB_EX_OFF; + + // Decide latency for next STUB req-rsp + switch (param_stub_req_rsp_lat_ctrl) { + // Fixed STUB req-rsp latency + case 0: + STUB_REQ_RSP_LAT(stub_req_rsp_lat); + break; + // Random STUB req-rsp latency (minimum value should be 1) + case 1: + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + break; + default: + cheshire_end(); + return RET_CODE_WRONG_CASE; + } + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store[i] = INIT_NONZERO_VAL_ST; + } + for (uint64_t i = 0; i < vl; i++) { + address_load[i] = vl + vstart_val + i + MAGIC_NUM; + } + + // Get information about the next axi transfer + get_unit_stride_bursts_wrap(&axi_log, address_store, vl, EEW, MEM_BUS_BYTE, vstart_val, 1); + + // Load the whole register + _VLD(v0, address_load) + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + + // Setup STUB behavior + uint64_t ex_lat; + switch (param_stub_ex_ctrl) { + // No exceptions + case 0: + ex_lat = 0; + STUB_EX_OFF; + break; + // Always exceptions at every request + case 1: + ex_lat = 0; + STUB_EX_ON; + STUB_NO_EX_LAT(ex_lat); + break; + // Random exceptions + case 2: + // If ex_lat == axi_log.bursts, no exception for this transaction! + ex_lat = pseudo_rand(axi_log.bursts); + STUB_EX_ON; + STUB_NO_EX_LAT(ex_lat); + break; + default: + cheshire_end(); + return RET_CODE_WRONG_CASE; + } + + // Get information about the next vstart + uint64_t body_elm_pre_exception = get_body_elm_pre_exception(&axi_log, ex_lat); + uint64_t vstart_post_ex = vstart_val + body_elm_pre_exception; + + // Check for illegal new vstart values + RVV_TEST_ASSERT(vstart_post_ex >= vstart_val && (vstart_post_ex < vl || (param_stub_ex_ctrl == 2 && vstart_post_ex == vl))) + + // Store back the values + _VST(v0, address_store) + + // Check pre-start values + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_ST) + } + + // Check if we had an exception on this transaction + if (param_stub_ex_ctrl == 1 || (param_stub_ex_ctrl == 2 && ex_lat < axi_log.bursts)) { + // Check that the body, up to the exception, has the correct value + for (uint64_t i = vstart_val; i < vstart_post_ex; i++) { + ASSERT_EQ(address_store[i], address_load[i]) + } + + // Check that the body, after the exception, was untouched in memory + for (uint64_t i = vstart_post_ex; i < vl; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_ST) + } + + // Check that the new vstart is correct + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, vstart_post_ex) + // Check the exception + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, axi_log.burst_start_addr[ex_lat], CAUSE_STORE_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + // Recover the instruction + // The following instructions resets the STUB counter for the exceptions, too! + STUB_EX_OFF; + _VST(v0, address_store) + STUB_EX_ON; + // Check pre-start values again + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_ST) + } + } + + // No exception (or just-recovered-from-an-exception) area + // Check that the body was correctly stored + for (uint64_t i = vstart_val; i < vl; i++) { + ASSERT_EQ(address_store[i], address_load[i]) + } + + // Check that vstart was reset at zero + vstart_read = -1; + + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Clean-up + RVV_TEST_CLEANUP(); + + // Jump from limit low to limit high if limit high is higher than low + if ((VSTART_LIMIT_LOW) < (VSTART_LIMIT_HIGH)) + if (vstart_val == VSTART_LIMIT_LOW) + vstart_val = VSTART_LIMIT_HIGH; + + ret_cnt++; + } + // Jump from limit low to limit high if limit high is higher than low + if ((VL_LIMIT_LOW) < (VL_LIMIT_HIGH)) + if (avl == VL_LIMIT_LOW) + avl = VL_LIMIT_HIGH; + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + // If we did not return before, the test passed + return RET_CODE_SUCCESS; +} diff --git a/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body b/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body new file mode 100644 index 000000000..e141c82be --- /dev/null +++ b/cheshire/sw/src/tests/body/rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body @@ -0,0 +1,252 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +#if (EXTENSIVE_TEST == 1) +#define VL_LIMIT_LOW ELMMAX + 1 +#define VL_LIMIT_HIGH 0 +#define VSTART_LIMIT_LOW vl + 1 +#define VSTART_LIMIT_HIGH 0 +#else +#define VL_LIMIT_LOW 3*ARA_NR_LANES + 1 +#define VL_LIMIT_HIGH ELMMAX - (3*ARA_NR_LANES + 1) +#define VSTART_LIMIT_LOW 2*ARA_NR_LANES + 1 +#define VSTART_LIMIT_HIGH vl - 2*ARA_NR_LANES - 1 +#endif + +#define INIT_NONZERO_VAL_V0 99 +#define INIT_NONZERO_VAL_V8 67 + +// Derived parameters +#define param_stub_ex { param_stub_ex_ctrl ? 1 : 0; } + +uint64_t stub_req_rsp_lat = param_stub_req_rsp_lat; + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_ON; + + // Vector configuration parameters and variables + uint64_t avl_original = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store_0 [ELMMAX]; + _DTYPE array_store_1 [ELMMAX]; + _DTYPE* address_load = array_load; + _DTYPE* address_store_0 = array_store_0; + _DTYPE* address_store_1 = array_store_1; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: Exception generation and non-zero vstart: vector store + ////////////////////////////////////////////////////////////////// + + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 1; (avl <= VL_LIMIT_LOW || avl >= VL_LIMIT_HIGH) && avl <= ELMMAX + 1; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + + // Loop over vstart values. Also test vstart > vl. + for (uint64_t vstart_val = 0; (vstart_val <= VSTART_LIMIT_LOW || vstart_val >= VSTART_LIMIT_HIGH) && vstart_val < vl; vstart_val++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + + // Decide latency for next STUB req-rsp + switch (param_stub_req_rsp_lat_ctrl) { + // Fixed STUB req-rsp latency + case 0: + STUB_REQ_RSP_LAT(stub_req_rsp_lat); + break; + // Random STUB req-rsp latency (minimum value should be 1) + case 1: + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + break; + default: + cheshire_end(); + return RET_CODE_WRONG_CASE; + } + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store_0[i] = INIT_NONZERO_VAL_ST; + address_store_1[i] = INIT_NONZERO_VAL_ST; + } + for (uint64_t i = 0; i < vl; i++) { + address_load[i] = vl + vstart_val + i + MAGIC_NUM; + } + // Init VRF (use v0) + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + asm volatile("vmv.v.x v8, %0" :: "r" (INIT_NONZERO_VAL_V8)); + + // Get information about the next axi transfer + get_unit_stride_bursts_wrap(&axi_log, address_load, vl, EEW, MEM_BUS_BYTE, vstart_val, 0); + + // Setup STUB behavior + uint64_t ex_lat; + switch (param_stub_ex_ctrl) { + // No exceptions + case 0: + ex_lat = axi_log.bursts; + STUB_EX_OFF; + break; + // Always exceptions at every request + case 1: + ex_lat = 0; + STUB_EX_ON; + STUB_NO_EX_LAT(ex_lat); + break; + // Random exceptions + case 2: + // If ex_lat == axi_log->bursts, no exception for this transaction! + ex_lat = pseudo_rand(axi_log.bursts); + STUB_EX_ON; + STUB_NO_EX_LAT(ex_lat); + break; + default: + cheshire_end(); + return RET_CODE_WRONG_CASE; + } + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + + // Load the whole register + _VLD(v0, address_load) + + // Get information about the next vstart + uint64_t body_elm_pre_exception = get_body_elm_pre_exception(&axi_log, ex_lat); + uint64_t vstart_post_ex = vstart_val + body_elm_pre_exception; + + // Check for illegal new vstart values + RVV_TEST_ASSERT(vstart_post_ex >= vstart_val && (vstart_post_ex < vl || (ex_lat == axi_log.bursts && vstart_post_ex == vl))) + + // Check if we had an exception on this transaction + if (param_stub_ex_ctrl == 1 || (param_stub_ex_ctrl == 2 && ex_lat < axi_log.bursts)) { + // Check that the new vstart is correct + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, vstart_post_ex) + // Check the exception + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, axi_log.burst_start_addr[ex_lat], CAUSE_LOAD_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + // Restart the instruction on another reg, or just load everything in v8 too. + // Then, store everything from v8 + STUB_EX_OFF; + _VLD(v8, address_load) + _VST(v8, address_store_1) + STUB_EX_ON; + + // Pre-body check v8 + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store_1[i], INIT_NONZERO_VAL_V8) + } + + // Body check 0 + for (uint64_t i = vstart_val; i < vstart_post_ex; i++) { + ASSERT_EQ(address_store_1[i], INIT_NONZERO_VAL_V8) + } + + // Body check 1 + for (uint64_t i = vstart_post_ex; i < vl; i++) { + ASSERT_EQ(address_store_1[i], address_load[i]) + } + } + + // Check that vstart was reset at zero + vstart_read = -1; + + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Store back the values of v0 + STUB_EX_OFF; + _VST(v0, address_store_0) + STUB_EX_ON; + + // Pre-body check v0 + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store_0[i], INIT_NONZERO_VAL_V0) + } + + // Body check 0 + for (uint64_t i = vstart_val; i < vstart_post_ex; i++) { + ASSERT_EQ(address_store_0[i], address_load[i]) + } + + // Body check 1 + for (uint64_t i = vstart_post_ex; i < vl; i++) { + ASSERT_EQ(address_store_0[i], INIT_NONZERO_VAL_V0) + } + + // Clean-up + RVV_TEST_CLEANUP(); + + // Jump from limit low to limit high if limit high is higher than low + if ((VSTART_LIMIT_LOW) < (VSTART_LIMIT_HIGH)) + if (vstart_val == VSTART_LIMIT_LOW) + vstart_val = VSTART_LIMIT_HIGH; + + ret_cnt++; + } + + // Jump from limit low to limit high if limit high is higher than low + if ((VL_LIMIT_LOW) < (VL_LIMIT_HIGH)) + if (avl == VL_LIMIT_LOW) + avl = VL_LIMIT_HIGH; + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + // If we did not return before, the test passed + return RET_CODE_SUCCESS; +} diff --git a/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride.c.body b/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride.c.body new file mode 100644 index 000000000..539fa135c --- /dev/null +++ b/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride.c.body @@ -0,0 +1,159 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Vincenzo Maisto +// Matteo Perotti + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // Vector configuration parameters and variables + uint64_t avl = RVV_TEST_AVL(64); + uint64_t vl; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + _DTYPE array_load [RVV_TEST_AVL(64)]; + _DTYPE array_store [RVV_TEST_AVL(64)]; + _DTYPE* address_load = array_load; + _DTYPE* address_store = array_store; + _DTYPE* address_misaligned; + uint8_t byte; + uint64_t vstart_val; + _DTYPE store_val, preload_val; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ///////////////// + // STUB CONFIG // + ///////////////// + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY(param_stub_virt_mem); + STUB_EX_OFF; + STUB_REQ_RSP_LAT(param_stub_req_rsp_lat); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: Zero and non-zero vstart loads + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // Loop over vstart values + for ( uint64_t vstart_val = 0; vstart_val < vl; vstart_val++ ) { + RVV_TEST_INIT( vl, avl ) + + if (param_stub_req_rsp_lat_ctrl) { + // Random STUB req-rsp latency (minimum value should be 1) + STUB_REQ_RSP_LAT((vstart_val % param_stub_req_rsp_lat) + 1); + } + + // Init memory + for ( uint64_t i = 0; i < vstart_val; i++ ) { + address_load[i] = i; + } + for ( uint64_t i = vstart_val; i < vl; i++ ) { + address_load[i] = 0; + } + // Init VRF with prestart + _VLD(v0,address_load) + + // Init memory + for ( uint64_t i = vstart_val; i < vl; i++ ) { + address_load[i] = vstart_val + vl + i; + } + // Set vstart + asm volatile ("csrs vstart, %0" :: "r"(vstart_val) ); + // Test target: load vr group body + _VLD(v0,address_load) + // Store whole vr group + _VST(v0,address_store) + + // Check pre-start + for ( uint64_t i = 0; i < vstart_val; i++ ) { + RVV_TEST_ASSERT ( address_store[i] == i ); + } + // Check body + for ( uint64_t i = vstart_val; i < vl; i++ ) { + RVV_TEST_ASSERT ( address_store[i] == address_load[i] ); + } + + RVV_TEST_CLEANUP(); + ret_cnt++; + } + + ////////////////////////////////////////////////////////////////// + // TEST: Zero and non-zero vstart unit-stride stores + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // Loop over vstart values + for ( uint64_t vstart_val = 0; vstart_val < vl; vstart_val++ ) { + RVV_TEST_INIT( vl, avl ); + + if (param_stub_req_rsp_lat_ctrl) { + // Random STUB req-rsp latency (minimum value should be 1) + STUB_REQ_RSP_LAT((vstart_val % param_stub_req_rsp_lat) + 1); + } + + store_val = vl; + + // Init memory + for ( uint64_t i = 0; i < vl; i++ ) { + address_store[i] = vstart_val + i; + } + for ( uint64_t i = 0; i < vl; i++ ) { + address_load[i] = vstart_val + store_val + i; + } + + _VLD(v24,address_load) + asm volatile ("csrs vstart, %0" :: "r"(vstart_val) ); + _VST(v24,address_store) + + // Check pre-start + for ( uint64_t i = 0; i < vstart_val; i++ ) { + RVV_TEST_ASSERT ( address_store[i] == vstart_val + i ); + } + // Check body + for ( uint64_t i = vstart_val; i < vl; i++ ) { + RVV_TEST_ASSERT ( address_store[i] == address_load[i] ); + } + + RVV_TEST_CLEANUP(); + ret_cnt++; + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + return 0; +} diff --git a/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body b/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body new file mode 100644 index 000000000..d5c591745 --- /dev/null +++ b/cheshire/sw/src/tests/body/rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body @@ -0,0 +1,148 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Vincenzo Maisto +// Matteo Perotti + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +// MMU req gen disabled by default +#ifndef param_mmu_req_gen_en + #define param_mmu_req_gen_en 0 + #define param_mmu_req_gen_lat 0 +#endif + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_ON; + STUB_NO_EX_LAT(0); // Always exception at every STUB request! + STUB_REQ_RSP_LAT(param_stub_req_rsp_lat); + MMU_REQ_GEN_EN(param_mmu_req_gen_en); + MMU_REQ_GEN_LAT(param_mmu_req_gen_lat); + + // Vector configuration parameters and variables + uint64_t avl = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + _DTYPE array_load [VLMAX]; + _DTYPE array_store [VLMAX]; + _DTYPE* address_load = array_load; + _DTYPE* address_store = array_store; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: Exception generation: vector load + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + _VLD(v0,address_load) + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, address_load, CAUSE_LOAD_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + RVV_TEST_CLEANUP(); + + // ////////////////////////////////////////////////////////////////// + // // TEST: Exception generation: vector store + // ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + _VST(v0, address_store) + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, address_store, CAUSE_STORE_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: Exception generation and non-zero vstart: vector load + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // Loop over vstart values + for ( uint64_t vstart_val = 0; vstart_val < vl; vstart_val++ ) { + RVV_TEST_INIT( vl, avl ); + + if (param_stub_req_rsp_lat_ctrl) { + // Random STUB req-rsp latency (minimum value should be 1) + STUB_REQ_RSP_LAT((vstart_val % param_stub_req_rsp_lat) + 1); + } + + asm volatile ("csrs vstart, %0" :: "r"(vstart_val) ); + _VLD(v0, address_load) + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, address_load + vstart_val, CAUSE_LOAD_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + vstart_read = -1; + asm volatile ("csrr %0, vstart" : "=r"(vstart_read) ); + RVV_TEST_ASSERT ( vstart_read == vstart_val ) + + RVV_TEST_CLEANUP(); + ret_cnt++; + } + + ////////////////////////////////////////////////////////////////// + // TEST: Exception generation and non-zero vstart: vector store + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // Loop over vstart values + for ( uint64_t vstart_val = 0; vstart_val < vl; vstart_val++ ) { + RVV_TEST_INIT( vl, avl ); + + if (param_stub_req_rsp_lat_ctrl) { + // Random STUB req-rsp latency (minimum value should be 1) + STUB_REQ_RSP_LAT((vstart_val % param_stub_req_rsp_lat) + 1); + } + + asm volatile ("csrs vstart, %0" :: "r"(vstart_val) ); + + _VST(v0, address_store) + RVV_TEST_ASSERT_EXCEPTION_EXTENDED(1, address_store + vstart_val, CAUSE_STORE_PAGE_FAULT) + RVV_TEST_CLEAN_EXCEPTION() + + vstart_read = -1; + asm volatile ("csrr %0, vstart" : "=r"(vstart_read) ); + RVV_TEST_ASSERT( vstart_read == vstart_val ); + + RVV_TEST_CLEANUP(); + ret_cnt++; + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + return 0; +} diff --git a/cheshire/sw/src/tests/regfile_text_stub_regs.c b/cheshire/sw/src/tests/regfile_text_stub_regs.c new file mode 100644 index 000000000..215f4a43a --- /dev/null +++ b/cheshire/sw/src/tests/regfile_text_stub_regs.c @@ -0,0 +1,68 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +// 1 to print test information +#define COND_PRINT 0 + +/* Soc-Level regfile list (defined in rvv_test.h) + rf_virt_mem_en + rf_stub_ex_en + rf_stub_no_ex_lat + rf_req_rsp_lat +*/ + +// Write the SoC-level regfile and verify the values +// We use lightweight char-only uart print +int main(void) { + cheshire_start(); + + // Declare the SoC-level STUB registers + INIT_RVV_TEST_SOC_REGFILE; + + // Write the register file (chars-only because it's easier to print) + *rf_virt_mem_en = '1'; + *rf_stub_ex_en = '2'; + *rf_stub_no_ex_lat = '3'; + *rf_req_rsp_lat = '4'; + + // Read the register file again (check written values) + ASSERT_EQ(*rf_virt_mem_en, '1'); + ASSERT_EQ(*rf_stub_ex_en, '2'); + ASSERT_EQ(*rf_stub_no_ex_lat, '3'); + ASSERT_EQ(*rf_req_rsp_lat, '4'); + +#if (COND_PRINT == 1) + // Initialize UART and print + // Avoid printf to minimize program preload time + PRINT_INIT; + PRINT("SoC-level regfile values:\r\n"); + PRINT_CHAR(*rf_virt_mem_en); + PRINT_CHAR(*rf_stub_ex_en); + PRINT_CHAR(*rf_stub_no_ex_lat); + PRINT_CHAR(*rf_req_rsp_lat); +#endif + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + return 0; +} diff --git a/cheshire/sw/src/tests/rvv_test_exceptions.c b/cheshire/sw/src/tests/rvv_test_exceptions.c new file mode 100644 index 000000000..721de3181 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_exceptions.c @@ -0,0 +1,207 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Vincenzo Maisto + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +uint64_t dummy[3]; + +int main(void) { + cheshire_start(); + + // Vector configuration parameters and variables + uint64_t avl = RVV_TEST_AVL(64); + uint64_t vl; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + uint64_t array_load [RVV_TEST_AVL(64)]; + uint64_t array_store [RVV_TEST_AVL(64)] = {0}; + uint64_t* address_load = array_load; + uint64_t* address_store = array_store; + uint64_t* address_misaligned; + uint64_t vstart_read; + + RVV_TEST_CLEAN_EXCEPTION(); + INIT_RVV_TEST_SOC_REGFILE; + RESET_SOC_CSR; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: Legal encoding + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + asm volatile("vmv.v.i v0 , 1"); + RVV_TEST_ASSERT_EXCEPTION(0) + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: Illegal encoding + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + asm volatile("vmv.v.i v1 , 1"); + RVV_TEST_ASSERT_EXCEPTION(1) + RVV_TEST_CLEAN_EXCEPTION() + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: vstart update + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + vstart_read = -1; + // CSR <-> vector instrucitons + asm volatile ("csrs vstart, 1"); + asm volatile ("csrr %0, vstart" : "=r"(vstart_read)); + RVV_TEST_ASSERT ( vstart_read == (uint64_t)1 ); + + ////////////////////////////////////////////////////////////////// + // TEST: vstart automatic reset + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // NOTE: This relied on non-zero vstart support for arithmetic instructions, i.e., operand request + // NOTE2: supporting vstart != 0 for arithmetic instructions is NOT a spec requirement + asm volatile ("vmv.v.i v24, -1"); + asm volatile ("csrs vstart, 1"); + asm volatile ("vadd.vv v0, v24, v24"); + asm volatile ("csrr %0, vstart" : "=r"(vstart_read)); + RVV_TEST_ASSERT ( vstart_read == (uint64_t)0 ); + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: These instructions should WB asap to ROB + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + // Vector permutation/arithmetic + asm volatile("vmv.v.i v0 , 1"); + asm volatile("csrr %0 , vl" : "=r"(vl)); + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: These intructions should WB to CVA6 only after WB from PEs + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + address_load = array_load; + // initialize + for ( uint64_t i = 0; i < vl; i++ ) { + array_load[i] = -i; + } + + // Vector load + _VLD(v24, address_load) + // Vector store + _VST(v16, address_store) + // Vector load + _VLD(v8, address_load) + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: Legal non-zero vstart on vector instructions + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + asm volatile("csrs vstart, 3"); + asm volatile("vadd.vv v24 , v16, v16"); + RVV_TEST_ASSERT_EXCEPTION(0) + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: Legal non-zero vstart on vector CSR + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, avl ); + + asm volatile("csrs vstart, 3"); + asm volatile("vsetvli x0 , x0, e64, m8, ta, ma" ); + RVV_TEST_ASSERT_EXCEPTION(0) + + asm volatile("csrs vstart, 22"); + _VLD(v24, address_load) + RVV_TEST_ASSERT_EXCEPTION(0) + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: EEW misaligned loads + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, 1 ); + + // Get a valid byte-misaligned address + address_misaligned = (void*)(((uint64_t)(&dummy[1]) | 1)); + // Exception only for EEW > 8 + _VLD(v16, address_misaligned) + if (EEW > 8) { + RVV_TEST_ASSERT_EXCEPTION(1) + } else { + RVV_TEST_ASSERT_EXCEPTION(0) + } + RVV_TEST_CLEAN_EXCEPTION() + + RVV_TEST_CLEANUP(); + + ////////////////////////////////////////////////////////////////// + // TEST: EEW misaligned stores + ////////////////////////////////////////////////////////////////// + RVV_TEST_INIT( vl, 1 ); + + // Get a byte-misaligned address + address_misaligned = (void*)(((uint64_t)(&dummy[1]) | 1)); + // Exception only for EEW > 8 + _VST(v24, address_misaligned) + if (EEW > 8) { + RVV_TEST_ASSERT_EXCEPTION(1) + } else { + RVV_TEST_ASSERT_EXCEPTION(0) + } + RVV_TEST_CLEAN_EXCEPTION() + + RVV_TEST_CLEANUP(); + + //////////////////////////////////////////////////////////////////// + // Missing tests for unimplemented features: + // TEST: Illegal non-zero vstart + //////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + return 0; +} diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat.c new file mode 100644 index 000000000..bfb833c11 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 1 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat_var_ex.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat_var_ex.c new file mode 100644 index 000000000..86c991b03 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_page_fault_var_lat_var_ex.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 2 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_var_lat.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_var_lat.c new file mode 100644 index 000000000..48edfd5ea --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_comprehensive_var_lat.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 0 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_corner_cases.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_corner_cases.c new file mode 100644 index 000000000..798b04413 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_corner_cases.c @@ -0,0 +1,277 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +#define INIT_NONZERO_VAL_V0 99 + +// Derived parameters +uint64_t stub_req_rsp_lat = 10; + +int main(void) { + cheshire_start(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_OFF; + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Vector configuration parameters and variables + uint64_t avl_original = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + + // Helper variables and arrays + _DTYPE array_load [VLMAX]; + _DTYPE array_store [VLMAX]; + _DTYPE* address_load = array_load; + _DTYPE* address_store = array_store; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: vstore, vl >= 0, vstart >= vl + ////////////////////////////////////////////////////////////////// + + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; avl <= ARA_NR_LANES + 2; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = vl; vstart_val <= vl + ARA_NR_LANES + 2; vstart_val++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Init memory + for (uint64_t i = 0; i < ARA_NR_LANES + 1; i++) { + address_store[i] = INIT_NONZERO_VAL_ST; + } + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + // Store + _VST(v0, address_store) + + // Check that vstart was reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Elements in memory should not have been touched + for (uint64_t i = 0; i < ARA_NR_LANES + 1; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_ST) + } + + // Clean-up + RVV_TEST_CLEANUP(); + + ret_cnt++; + } + } + + ////////////////////////////////////////////////////////////////// + // TEST: vload, vl >= 0, vstart >= vl + ////////////////////////////////////////////////////////////////// + + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; avl <= ARA_NR_LANES + 2; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = vl; vstart_val <= vl + ARA_NR_LANES + 2; vstart_val++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Init memory + for (uint64_t i = 0; i < ARA_NR_LANES + 1; i++) { + address_store[i] = INIT_NONZERO_VAL_ST; + } + for (uint64_t i = 0; i < ARA_NR_LANES + 1; i++) { + address_load[i] = vl + vstart_val + i + MAGIC_NUM; + } + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + // Load the whole register (nothing should happen) + _VLD(v0, address_load) + + // Check that vstart was reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Store the old content of the v0 reg + RVV_TEST_INIT(vl, ARA_NR_LANES + 2); + _VST(v0, address_store) + + // We should have + for (uint64_t i = 0; i < ARA_NR_LANES + 1; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_V0) + } + + // Clean-up + RVV_TEST_CLEANUP(); + + ret_cnt++; + } + } + + ////////////////////////////////////////////////////////////////// + // TEST: previous tests should not mess up with regular stores + ////////////////////////////////////////////////////////////////// + + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; avl <= ARA_NR_LANES + 2; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = 0; vstart_val < vl; vstart_val++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store[i] = INIT_NONZERO_VAL_ST; + } + for (uint64_t i = 0; i < vl; i++) { + address_load[i] = vl + vstart_val + i + MAGIC_NUM; + } + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + + // Load the whole register + _VLD(v0, address_load) + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + + // Store + _VST(v0, address_store) + + // Check that vstart was reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Prestart elements + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_ST) + } + + // Body elements + for (uint64_t i = vstart_val; i < vl; i++) { + ASSERT_EQ(address_store[i], address_load[i]) + } + + // Clean-up + RVV_TEST_CLEANUP(); + + ret_cnt++; + } + } + + ////////////////////////////////////////////////////////////////// + // TEST: previous tests should not mess up with regular loads + ////////////////////////////////////////////////////////////////// + + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; avl <= ARA_NR_LANES + 2; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = 0; vstart_val < vl; vstart_val++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store[i] = INIT_NONZERO_VAL_ST; + } + for (uint64_t i = 0; i < vl; i++) { + address_load[i] = vl + vstart_val + i + MAGIC_NUM; + } + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + // Load the whole register (nothing should happen) + _VLD(v0, address_load) + + // Check that vstart was reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Store + _VST(v0, address_store) + + // Prestart elements + for (uint64_t i = 0; i < vstart_val; i++) { + ASSERT_EQ(address_store[i], INIT_NONZERO_VAL_V0) + } + + // Body elements + for (uint64_t i = vstart_val; i < vl; i++) { + ASSERT_EQ(address_store[i], address_load[i]) + } + + // Clean-up + RVV_TEST_CLEANUP(); + + ret_cnt++; + } + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + // If we did not return before, the test passed + return RET_CODE_SUCCESS; +} diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat.c new file mode 100644 index 000000000..2999ba4a3 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 1 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat_var_ex.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat_var_ex.c new file mode 100644 index 000000000..0023d757b --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_page_fault_var_lat_var_ex.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 2 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_var_lat.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_var_lat.c new file mode 100644 index 000000000..cd73bd3a8 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_ld_comprehensive_var_lat.c @@ -0,0 +1,16 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti +// Vincenzo Maisto + +// Tunable parameters +// param_stub_ex_ctrl. 0: no exceptions, 1: always exceptions, 2: random exceptions +#define param_stub_ex_ctrl 0 + +// param_stub_req_rsp_lat_ctrl. 0: fixed latency (== param_stub_req_rsp_lat), 1: random latency (max == param_stub_req_rsp_lat) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +#include "rvv_test_mmu_stub_unit_stride_ld_comprehensive.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_reshuffle.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_reshuffle.c new file mode 100644 index 000000000..bb165c5e1 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_reshuffle.c @@ -0,0 +1,237 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +#if (EXTENSIVE_TEST == 1) +#define VL_LIMIT_LOW ELMMAX +#define VL_LIMIT_HIGH 0 +#define VSTART_LIMIT_LOW vl + 1 +#define VSTART_LIMIT_HIGH 0 +#else +#define VL_LIMIT_LOW 3*ARA_NR_LANES + 1 +#define VL_LIMIT_HIGH ELMMAX - (3*ARA_NR_LANES + 1) +#define VSTART_LIMIT_LOW 2*ARA_NR_LANES + 1 +#define VSTART_LIMIT_HIGH vl - 2*ARA_NR_LANES - 1 +#endif + +#define INIT_NONZERO_VAL_V0 99 +#define INIT_NONZERO_VAL_V8 84 +#define INIT_NONZERO_VAL_ST_0 44 +#define INIT_NONZERO_VAL_ST_1 65 + +// Derived parameters +uint64_t stub_req_rsp_lat = 10; + +// If lanes == 8 and eew == 8, these vectors are too large to be instantiated in the stack. +// In all the other cases, the stack is the preferred choice since everything outside of the +// stack should be preloaded with the slow JTAG, and the simulation time increases +#if !((ARA_NR_LANES < 8) || (EEW > 8)) + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store_0 [ELMMAX]; + _DTYPE array_store_1 [ELMMAX]; +#endif + +// Check an array in the byte range [start_byte, end_byte) to see if it corresponds to a repetition of +// gold_size-byte gold values. For example: +// arr: 0x88 0x4A 0x32 0x4A +// gold: 0x32 0x4A +// gold_size: 2 [byte] +// start_byte: 0 (included) +// end_byte: 3 (non included) +// return value: 1 +int check_byte_arr(const void* arr, uint64_t start_byte, uint64_t end_byte, int64_t gold, uint64_t gold_size) { + const uint8_t* mem_bytes = (const uint8_t*)arr; + + // Iterate over each byte of the array + for (uint64_t i = start_byte; i < end_byte; i++) { + // Dynamically calculate the expected byte + uint8_t expected_byte = (gold >> ((i % gold_size) * 8)) & 0xFF; + if (expected_byte != mem_bytes[i]) return 0; + } + // Everything's good + return 1; +} + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_OFF; + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Vector configuration parameters and variables + uint64_t avl_original = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + +// See note above +#if (ARA_NR_LANES < 8) || (EEW > 8) + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store_0 [ELMMAX]; + _DTYPE array_store_1 [ELMMAX]; +#endif + + _DTYPE* address_load = array_load; + _DTYPE* address_store_0 = array_store_0; + _DTYPE* address_store_1 = array_store_1; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + for (uint64_t ew = 0; ew < 4; ew++) { + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; (avl <= VL_LIMIT_LOW || avl >= VL_LIMIT_HIGH) && avl <= ELMMAX; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = 0; (vstart_val <= VSTART_LIMIT_LOW || vstart_val >= VSTART_LIMIT_HIGH) && vstart_val < vl; vstart_val++) { + + // Calculate vl and vstart byte in memory for fixed EEW + // Original encoding of vregs, before shuffling + uint64_t eew_src = 1 << (3 - ew); + // Post-shuffle vregs encoding + uint64_t eew_dst = EEW / 8; + // vstart and vl bytes in the memory array + uint64_t vstart_byte = vstart_val * eew_dst; + uint64_t vl_byte = vl * eew_dst; + + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + // Random latency + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Set up the source EEW and reset v0 and v8 with same encoding + switch(ew) { + case 0: + _VSETVLI_64(vl, -1) + break; + case 1: + _VSETVLI_32(vl, -1) + break; + case 2: + _VSETVLI_16(vl, -1) + break; + default: + _VSETVLI_8(vl, -1) + } + asm volatile("vmv.v.x v0, %0" :: "r" (INIT_NONZERO_VAL_V0)); + asm volatile("vmv.v.x v8, %0" :: "r" (INIT_NONZERO_VAL_V8)); + + // Set up the target EEW + _VSETVLI(vl, avl) + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store_0[i] = INIT_NONZERO_VAL_ST_0; + } + for (uint64_t i = 0; i < vl; i++) { + address_store_1[i] = INIT_NONZERO_VAL_ST_1; + } + for (uint64_t i = 0; i < vl; i++) { + address_load[i] = vl + vstart_val + MAGIC_NUM; + } + + // Force V8 reshuffle with target EEW + asm volatile("vadd.vv v24, v8, v8"); + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + // Force a load-triggered reshuffle when vstart >= 0 + _VLD(v0, address_load) + + // Check that vstart is correctly reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + // Store v0 (no reshuffle here) + _VST(v0, address_store_0) + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + + // Store v8 (no reshuffle here) + _VST(v8, address_store_1) + + // Load test - prestart + int retval = check_byte_arr(address_store_0, 0, vstart_byte, INIT_NONZERO_VAL_V0, eew_src); + ASSERT_TRUE(retval); + + // Load test - body + retval = check_byte_arr(address_store_0, vstart_byte, vl_byte, address_load[0], eew_dst); + ASSERT_TRUE(retval); + + // Store test - prestart + retval = check_byte_arr(address_store_1, 0, vstart_byte, INIT_NONZERO_VAL_ST_1, eew_dst); + ASSERT_TRUE(retval); + + // Store test - body + retval = check_byte_arr(address_store_1, vstart_byte, vl_byte, INIT_NONZERO_VAL_V8, eew_src); + ASSERT_TRUE(retval); + + // Clean-up + RVV_TEST_CLEANUP(); + + // Jump from limit low to limit high if limit high is higher than low + if ((VSTART_LIMIT_LOW) < (VSTART_LIMIT_HIGH)) + if (vstart_val == VSTART_LIMIT_LOW) + vstart_val = VSTART_LIMIT_HIGH; + + ret_cnt++; + } + + // Jump from limit low to limit high if limit high is higher than low + if ((VL_LIMIT_LOW) < (VL_LIMIT_HIGH)) + if (avl == VL_LIMIT_LOW) + avl = VL_LIMIT_HIGH; + } + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + // If we did not return before, the test passed + return RET_CODE_SUCCESS; +} diff --git a/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_st_reshuffle.c b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_st_reshuffle.c new file mode 100644 index 000000000..18919e85d --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_mmu_stub_unit_stride_st_reshuffle.c @@ -0,0 +1,221 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +#if (EXTENSIVE_TEST == 1) +#define VL_LIMIT_LOW ELMMAX +#define VL_LIMIT_HIGH 0 +#define VSTART_LIMIT_LOW vl + 1 +#define VSTART_LIMIT_HIGH 0 +#else +#define VL_LIMIT_LOW 3*ARA_NR_LANES + 1 +#define VL_LIMIT_HIGH ELMMAX - (3*ARA_NR_LANES + 1) +#define VSTART_LIMIT_LOW 2*ARA_NR_LANES + 1 +#define VSTART_LIMIT_HIGH vl - 2*ARA_NR_LANES - 1 +#endif + +#define INIT_NONZERO_VAL_V0 99 +#define INIT_NONZERO_VAL_V8 84 +#define INIT_NONZERO_VAL_ST_0 44 +#define INIT_NONZERO_VAL_ST_1 65 + +// Derived parameters +uint64_t stub_req_rsp_lat = 10; + +// If lanes == 8 and eew == 8, these vectors are too large to be instantiated in the stack. +// In all the other cases, the stack is the preferred choice since everything outside of the +// stack should be preloaded with the slow JTAG, and the simulation time increases +#if !((ARA_NR_LANES < 8) || (EEW > 8)) + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store_0 [ELMMAX]; + _DTYPE array_store_1 [ELMMAX]; +#endif + +// Check an array in the byte range [start_byte, end_byte) to see if it corresponds to a repetition of +// gold_size-byte gold values. For example: +// arr: 0x88 0x4A 0x32 0x4A +// gold: 0x32 0x4A +// gold_size: 2 [byte] +// start_byte: 0 (included) +// end_byte: 3 (non included) +// return value: 1 +int check_byte_arr(const void* arr, uint64_t start_byte, uint64_t end_byte, int64_t gold, uint64_t gold_size) { + const uint8_t* mem_bytes = (const uint8_t*)arr; + + // Iterate over each byte of the array + for (uint64_t i = start_byte; i < end_byte; i++) { + // Dynamically calculate the expected byte + uint8_t expected_byte = (gold >> ((i % gold_size) * 8)) & 0xFF; + if (expected_byte != mem_bytes[i]) return 0; + } + // Everything's good + return 1; +} + +int main(void) { + cheshire_start(); + + // Clean the exception variable + RVV_TEST_CLEAN_EXCEPTION(); + + // This initialization is controlled through "defines" in the various + // derived tests. + INIT_RVV_TEST_SOC_REGFILE; + VIRTUAL_MEMORY_ON; + STUB_EX_OFF; + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Vector configuration parameters and variables + uint64_t avl_original = RVV_TEST_AVL(64); + uint64_t vl, vstart_read; + vcsr_dump_t vcsr_state = {0}; + +// See note above +#if (ARA_NR_LANES < 8) || (EEW > 8) + // Helper variables and arrays + _DTYPE array_load [ELMMAX]; + _DTYPE array_store_0 [ELMMAX]; + _DTYPE array_store_1 [ELMMAX]; +#endif + + _DTYPE* address_load = array_load; + _DTYPE* address_store_0 = array_store_0; + _DTYPE* address_store_1 = array_store_1; + + // Enalbe RVV + enable_rvv(); + vcsr_dump ( vcsr_state ); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + for (uint64_t ew = 0; ew < 4; ew++) { + // Loop through different avl, from 0 to avlmax + for (uint64_t avl = 0; (avl <= VL_LIMIT_LOW || avl >= VL_LIMIT_HIGH) && avl <= ELMMAX; avl++) { + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + for (uint64_t vstart_val = 0; (vstart_val <= VSTART_LIMIT_LOW || vstart_val >= VSTART_LIMIT_HIGH) && vstart_val < vl; vstart_val++) { + + // Calculate vl and vstart byte in memory for fixed EEW + // Original encoding of vregs, before shuffling + uint64_t eew_src = 1 << (3 - ew); + // Post-shuffle vregs encoding + uint64_t eew_dst = EEW / 8; + // vstart and vl bytes in the memory array + uint64_t vstart_byte = vstart_val * eew_dst; + uint64_t vl_byte = vl * eew_dst; + + // Reset vl, vstart, reset exceptions. + RVV_TEST_INIT(vl, avl); + // Random latency + STUB_REQ_RSP_LAT((stub_req_rsp_lat++ % MAX_LAT_P2) + 1); + + // Set up the source EEW and reset v0 and v8 with same encoding + switch(ew) { + case 0: + _VSETVLI_64(vl, -1) + break; + case 1: + _VSETVLI_32(vl, -1) + break; + case 2: + _VSETVLI_16(vl, -1) + break; + default: + _VSETVLI_8(vl, -1) + } + asm volatile("vmv.v.x v8, %0" :: "r" (INIT_NONZERO_VAL_V8)); + + // Set up the target EEW + _VSETVLI(vl, avl) + + // Init memory + for (uint64_t i = 0; i < vl; i++) { + address_store_1[i] = INIT_NONZERO_VAL_ST_1; + } + + // Setup vstart + asm volatile("csrs vstart, %0" :: "r"(vstart_val)); + + // Store v8 (force reshuffle) + _VST(v8, address_store_1) + + *rf_rvv_debug_reg = 0xF0000001; + + // Check that vstart is correctly reset at zero + vstart_read = -1; + asm volatile("csrr %0, vstart" : "=r"(vstart_read)); + ASSERT_EQ(vstart_read, 0) + + *rf_rvv_debug_reg = 0xF0000002; + + // Check that there was no exception + RVV_TEST_ASSERT_EXCEPTION(0) + RVV_TEST_CLEAN_EXCEPTION() + + *rf_rvv_debug_reg = 0xF0000003; + + // Store test - prestart + int retval = check_byte_arr(address_store_1, 0, vstart_byte, INIT_NONZERO_VAL_ST_1, eew_dst); + ASSERT_TRUE(retval); + + *rf_rvv_debug_reg = 0xF0000004; + + // Store test - body + retval = check_byte_arr(address_store_1, vstart_byte, vl_byte, INIT_NONZERO_VAL_V8, eew_src); + ASSERT_TRUE(retval); + + *rf_rvv_debug_reg = 0xF0000005; + + // Clean-up + RVV_TEST_CLEANUP(); + + // Jump from limit low to limit high if limit high is higher than low + if ((VSTART_LIMIT_LOW) < (VSTART_LIMIT_HIGH)) + if (vstart_val == VSTART_LIMIT_LOW) + vstart_val = VSTART_LIMIT_HIGH; + + ret_cnt++; + } + + // Jump from limit low to limit high if limit high is higher than low + if ((VL_LIMIT_LOW) < (VL_LIMIT_HIGH)) + if (avl == VL_LIMIT_LOW) + avl = VL_LIMIT_HIGH; + } + } + + // Clean-up the SoC CSRs + RESET_SOC_CSR; + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + // If we did not return before, the test passed + return RET_CODE_SUCCESS; +} diff --git a/cheshire/sw/src/tests/rvv_test_vstart_csrs.c b/cheshire/sw/src/tests/rvv_test_vstart_csrs.c new file mode 100644 index 000000000..2270119c6 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_vstart_csrs.c @@ -0,0 +1,83 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Vincenzo Maisto + +#include "regs/cheshire.h" +#include "dif/clint.h" +#include "dif/uart.h" +#include "params.h" +#include "util.h" +#include "encoding.h" +#include "rvv_test.h" + +#include "cheshire_util.h" + +int main(void) { + cheshire_start(); + + // Vector configuration parameters and variables + uint64_t avl = RVV_TEST_AVL(64); + uint64_t vl; + vcsr_dump_t vcsr_state = {0}; + + RVV_TEST_CLEAN_EXCEPTION(); + + // Helper variables and arrays + // None for this test + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // START OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + + ////////////////////////////////////////////////////////////////// + // TEST: MSTATUS.VS read implementation + ////////////////////////////////////////////////////////////////// + // Enalbe RVV + enable_rvv(); + RVV_TEST_ASSERT_EXCEPTION ( 0 ); + + ////////////////////////////////////////////////////////////////// + // TEST: CSR read implementation + ////////////////////////////////////////////////////////////////// + vcsr_dump ( vcsr_state ); + RVV_TEST_ASSERT_EXCEPTION ( 0 ); + + ////////////////////////////////////////////////////////////////// + // TEST: CSR write implementation + ////////////////////////////////////////////////////////////////// + vl = reset_v_state ( avl ); + RVV_TEST_ASSERT_EXCEPTION ( 0 ); + + ////////////////////////////////////////////////////////////////// + // TEST: CSR write exception implementation + ////////////////////////////////////////////////////////////////// + asm volatile ("csrw vl, 0"); + RVV_TEST_ASSERT_EXCEPTION ( 1 ); + RVV_TEST_CLEAN_EXCEPTION (); + + asm volatile ("csrw vlenb, 0"); + RVV_TEST_ASSERT_EXCEPTION ( 1 ); + RVV_TEST_CLEAN_EXCEPTION (); + + asm volatile ("csrw vtype, 0"); + RVV_TEST_ASSERT_EXCEPTION ( 1 ); + RVV_TEST_CLEAN_EXCEPTION (); + + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + // END OF TESTS + ////////////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////// + +#if (PRINTF == 1) + printf("Test SUCCESS!\r\n"); +#endif + + cheshire_end(); + + return 0; +} diff --git a/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat.c b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat.c new file mode 100644 index 000000000..421504730 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat.c @@ -0,0 +1,15 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +// Enable virtual memory +#define param_stub_virt_mem 1 + +// Fixed req-rsp latency of 1 cycle +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +// Test body +#include "rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat_mmu_req_gen.c b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat_mmu_req_gen.c new file mode 100644 index 000000000..0e977f102 --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_page_fault_var_lat_mmu_req_gen.c @@ -0,0 +1,20 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +// Enable virtual memory +#define param_stub_virt_mem 1 + +// Fixed req-rsp latency of 1 cycle +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +// MMU req gen enable +#define param_mmu_req_gen_en 1 +// Fixed rsp-req latency for MMU req gen +#define param_mmu_req_gen_lat 1 + +// Test body +#include "rvv_test_vstart_unit_stride_mmu_stub_page_fault.c.body" diff --git a/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_var_lat.c b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_var_lat.c new file mode 100644 index 000000000..6525077dd --- /dev/null +++ b/cheshire/sw/src/tests/rvv_test_vstart_unit_stride_mmu_stub_var_lat.c @@ -0,0 +1,15 @@ +// Copyright 2023 ETH Zurich and University of Bologna. +// Licensed under the Apache License, Version 2.0, see LICENSE for details. +// SPDX-License-Identifier: Apache-2.0 +// +// Matteo Perotti + +// Enable virtual memory +#define param_stub_virt_mem 1 + +// Variable req-rsp latency (from 1 to 10 cycles) +#define param_stub_req_rsp_lat_ctrl 1 +#define param_stub_req_rsp_lat 10 + +// Test body +#include "rvv_test_vstart_unit_stride.c.body"