From 36d8a8d5846ff9bf02a46e1aba1d5c261fd3e821 Mon Sep 17 00:00:00 2001 From: Matteo Perotti Date: Thu, 31 Oct 2024 11:55:57 +0100 Subject: [PATCH] [apps] Adapt mask logical tests to allowed tail behavior --- apps/riscv-tests/isa/rv64uv/vmand.c | 14 +++++++------- apps/riscv-tests/isa/rv64uv/vmandnot.c | 14 +++++++------- apps/riscv-tests/isa/rv64uv/vmnand.c | 14 +++++++------- apps/riscv-tests/isa/rv64uv/vmnor.c | 14 +++++++------- apps/riscv-tests/isa/rv64uv/vmor.c | 12 ++++++------ apps/riscv-tests/isa/rv64uv/vmornot.c | 12 ++++++------ apps/riscv-tests/isa/rv64uv/vmxnor.c | 14 +++++++------- apps/riscv-tests/isa/rv64uv/vmxor.c | 12 ++++++------ 8 files changed, 53 insertions(+), 53 deletions(-) diff --git a/apps/riscv-tests/isa/rv64uv/vmand.c b/apps/riscv-tests/isa/rv64uv/vmand.c index 9e280b9f2..82332f4bf 100644 --- a/apps/riscv-tests/isa/rv64uv/vmand.c +++ b/apps/riscv-tests/isa/rv64uv/vmand.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0x84, 0x21); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0xCD, 0xEF); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0x00, 0x00); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0x0D, 0xE0); } @@ -48,10 +48,10 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmand.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0x84, 0xE1); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0x84, 0x21); } void TEST_CASE6() { diff --git a/apps/riscv-tests/isa/rv64uv/vmandnot.c b/apps/riscv-tests/isa/rv64uv/vmandnot.c index 4952d9760..5902eedd5 100644 --- a/apps/riscv-tests/isa/rv64uv/vmandnot.c +++ b/apps/riscv-tests/isa/rv64uv/vmandnot.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmandnot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0x49, 0xCE); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmandnot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0x00, 0x00); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmandnot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0xCD, 0xEF); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmandnot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0xC0, 0x0F); } @@ -48,10 +48,10 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmandnot.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0x49, 0xEE); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0x49, 0xCE); } int main(void) { diff --git a/apps/riscv-tests/isa/rv64uv/vmnand.c b/apps/riscv-tests/isa/rv64uv/vmnand.c index 61db49ecc..543fb28b2 100644 --- a/apps/riscv-tests/isa/rv64uv/vmnand.c +++ b/apps/riscv-tests/isa/rv64uv/vmnand.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmnand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0x7B, 0xDE); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmnand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0x32, 0x10); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmnand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0xFF, 0xFF); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmnand.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0xF2, 0x1F); } @@ -48,10 +48,10 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmnand.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0x7B, 0xFE); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0x7B, 0xDE); } int main(void) { diff --git a/apps/riscv-tests/isa/rv64uv/vmnor.c b/apps/riscv-tests/isa/rv64uv/vmnor.c index 15322ac72..61a2f81f2 100644 --- a/apps/riscv-tests/isa/rv64uv/vmnor.c +++ b/apps/riscv-tests/isa/rv64uv/vmnor.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0x32, 0x10); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0x00, 0x00); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0x32, 0x10); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0x30, 0x00); } @@ -48,10 +48,10 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmnor.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0x32, 0xF0); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0x32, 0x10); } int main(void) { diff --git a/apps/riscv-tests/isa/rv64uv/vmor.c b/apps/riscv-tests/isa/rv64uv/vmor.c index 8a1693082..2ba46e404 100644 --- a/apps/riscv-tests/isa/rv64uv/vmor.c +++ b/apps/riscv-tests/isa/rv64uv/vmor.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0xCD, 0xEF); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0xFF, 0xFF); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0xCD, 0xEF); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0xCF, 0xFF); } @@ -48,9 +48,9 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(5, v1, 0xCD, 0xEF); } diff --git a/apps/riscv-tests/isa/rv64uv/vmornot.c b/apps/riscv-tests/isa/rv64uv/vmornot.c index 8e9497283..51ca14307 100644 --- a/apps/riscv-tests/isa/rv64uv/vmornot.c +++ b/apps/riscv-tests/isa/rv64uv/vmornot.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmornot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0xFF, 0xFF); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmornot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0xCD, 0xEF); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmornot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0xFF, 0xFF); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmornot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0xFD, 0xEF); } @@ -48,9 +48,9 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmornot.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(5, v1, 0xFF, 0xFF); } diff --git a/apps/riscv-tests/isa/rv64uv/vmxnor.c b/apps/riscv-tests/isa/rv64uv/vmxnor.c index fa0998a8d..74820c167 100644 --- a/apps/riscv-tests/isa/rv64uv/vmxnor.c +++ b/apps/riscv-tests/isa/rv64uv/vmxnor.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmxnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0xB6, 0x31); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmxnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0xCD, 0xEF); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmxnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0x32, 0x10); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmxnor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0x3D, 0xE0); } @@ -48,10 +48,10 @@ void TEST_CASE5() { VLOAD_8(v1, 0xFF, 0xFF); VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); - VSET(13, e8, m1); + VSET(16, e8, m1); asm volatile("vmxnor.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0xB6, 0xF1); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0xB6, 0x31); } int main(void) { diff --git a/apps/riscv-tests/isa/rv64uv/vmxor.c b/apps/riscv-tests/isa/rv64uv/vmxor.c index 6a561d752..958ab1d10 100644 --- a/apps/riscv-tests/isa/rv64uv/vmxor.c +++ b/apps/riscv-tests/isa/rv64uv/vmxor.c @@ -12,7 +12,7 @@ void TEST_CASE1() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x84, 0x21); asm volatile("vmxor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(1, v1, 0x49, 0xCE); } @@ -21,7 +21,7 @@ void TEST_CASE2() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0xFF, 0xFF); asm volatile("vmxor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(2, v1, 0x32, 0x10); } @@ -30,7 +30,7 @@ void TEST_CASE3() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x00, 0x00); asm volatile("vmxor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(3, v1, 0xCD, 0xEF); } @@ -39,7 +39,7 @@ void TEST_CASE4() { VLOAD_8(v2, 0xCD, 0xEF); VLOAD_8(v3, 0x0F, 0xF0); asm volatile("vmxor.mm v1, v2, v3"); - VSET(2, e8, m1); + VSET(16, e8, m1); VCMP_U8(4, v1, 0xC2, 0x1F); } @@ -50,8 +50,8 @@ void TEST_CASE5() { VLOAD_8(v3, 0x84, 0x21); VSET(13, e8, m1); asm volatile("vmxor.mm v1, v2, v3"); - VSET(2, e8, m1); - VCMP_U8(5, v1, 0x49, 0xEE); + VSET(16, e8, m1); + VCMP_U8(5, v1, 0x49, 0xCE); } int main(void) {