Skip to content

Commit

Permalink
sw: Increase the length multiplier
Browse files Browse the repository at this point in the history
  • Loading branch information
suehtamacv committed Nov 14, 2023
1 parent 3833e19 commit f2f1939
Show file tree
Hide file tree
Showing 69 changed files with 4,612 additions and 4,612 deletions.
236 changes: 118 additions & 118 deletions sw/riscvTests/isa/rv64uv/vadd.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,182 +8,182 @@
#include "vector_macros.h"

void TEST_CASE1(void) {
VSET(16, e8, m2);
VLOAD_8(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v6, v2, v4");
VCMP_U8(1, v6, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_16(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v6, v2, v4");
VCMP_U16(2, v6, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);

VSET(16, e32, m2);
VLOAD_32(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v6, v2, v4");
VCMP_U32(3, v6, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);
VSET(16, e8, m8);
VLOAD_8(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v24, v8, v16");
VCMP_U8(1, v24, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);

VSET(16, e16, m8);
VLOAD_16(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_16(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v24, v8, v16");
VCMP_U16(2, v24, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);

VSET(16, e32, m8);
VLOAD_32(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_32(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v24, v8, v16");
VCMP_U32(3, v24, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_64(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v6, v2, v4");
VCMP_U64(4, v6, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);
VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_64(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vv v24, v8, v16");
VCMP_U64(4, v24, 2, 4, 6, 8, 10, 12, 14, 16, 2, 4, 6, 8, 10, 12, 14, 16);
#endif
}

void TEST_CASE2(void) {
VSET(16, e8, m2);
VLOAD_8(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e8, m8);
VLOAD_8(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vv v6, v2, v4, v0.t");
VCMP_U8(5, v6, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);
VCLEAR(v24);
asm volatile("vadd.vv v24, v8, v16, v0.t");
VCMP_U8(5, v24, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_16(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e16, m8);
VLOAD_16(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_16(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vv v6, v2, v4, v0.t");
VCMP_U16(6, v6, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);
VCLEAR(v24);
asm volatile("vadd.vv v24, v8, v16, v0.t");
VCMP_U16(6, v24, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);

VSET(16, e32, m2);
VLOAD_32(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_32(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e32, m8);
VLOAD_32(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_32(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vv v6, v2, v4, v0.t");
VCMP_U32(7, v6, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);
VCLEAR(v24);
asm volatile("vadd.vv v24, v8, v16, v0.t");
VCMP_U32(7, v24, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_64(v4, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_64(v16, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vv v6, v2, v4, v0.t");
VCMP_U64(8, v6, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);
VCLEAR(v24);
asm volatile("vadd.vv v24, v8, v16, v0.t");
VCMP_U64(8, v24, 0, 4, 0, 8, 0, 12, 0, 16, 0, 4, 0, 8, 0, 12, 0, 16);
#endif
}

void TEST_CASE3(void) {
VSET(16, e8, m2);
VLOAD_8(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v6, v2, 5");
VCMP_U8(9, v6, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);
VSET(16, e8, m8);
VLOAD_8(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v24, v8, 5");
VCMP_U8(9, v24, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v6, v2, 5");
VCMP_U16(10, v6, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);
VSET(16, e16, m8);
VLOAD_16(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v24, v8, 5");
VCMP_U16(10, v24, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);

VSET(16, e32, m2);
VLOAD_32(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v6, v2, 5");
VCMP_U32(11, v6, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);
VSET(16, e32, m8);
VLOAD_32(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v24, v8, 5");
VCMP_U32(11, v24, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v6, v2, 5");
VCMP_U64(12, v6, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);
VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
asm volatile("vadd.vi v24, v8, 5");
VCMP_U64(12, v24, 6, 7, 8, 9, 10, 11, 12, 13, 6, 7, 8, 9, 10, 11, 12, 13);
#endif
}

void TEST_CASE4(void) {
VSET(16, e8, m2);
VLOAD_8(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e8, m8);
VLOAD_8(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vi v6, v2, 5, v0.t");
VCMP_U8(13, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vi v24, v8, 5, v0.t");
VCMP_U8(13, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e16, m8);
VLOAD_16(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vi v6, v2, 5, v0.t");
VCMP_U16(14, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vi v24, v8, 5, v0.t");
VCMP_U16(14, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

VSET(16, e32, m2);
VLOAD_32(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e32, m8);
VLOAD_32(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vi v6, v2, 5, v0.t");
VCMP_U32(15, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vi v24, v8, 5, v0.t");
VCMP_U32(15, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vi v6, v2, 5, v0.t");
VCMP_U64(16, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vi v24, v8, 5, v0.t");
VCMP_U64(16, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
#endif
}

void TEST_CASE5(void) {
const uint32_t scalar = 5;

VSET(16, e8, m2);
VLOAD_8(v2, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v6, v2, %[A]" ::[A] "r"(scalar));
VCMP_U8(17, v6, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);
VSET(16, e8, m8);
VLOAD_8(v8, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v24, v8, %[A]" ::[A] "r"(scalar));
VCMP_U8(17, v24, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);

VSET(16, e16, m2);
VLOAD_16(v2, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v6, v2, %[A]" ::[A] "r"(scalar));
VCMP_U16(18, v6, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);
VSET(16, e16, m8);
VLOAD_16(v8, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v24, v8, %[A]" ::[A] "r"(scalar));
VCMP_U16(18, v24, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);

VSET(16, e32, m2);
VLOAD_32(v2, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v6, v2, %[A]" ::[A] "r"(scalar));
VCMP_U32(19, v6, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);
VSET(16, e32, m8);
VLOAD_32(v8, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v24, v8, %[A]" ::[A] "r"(scalar));
VCMP_U32(19, v24, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v6, v2, %[A]" ::[A] "r"(scalar));
VCMP_U64(20, v6, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);
VSET(16, e64, m8);
VLOAD_64(v8, 1, -2, 3, -4, 5, -6, 7, -8, 9, -10, 11, -12, 13, -14, 15, -16);
asm volatile("vadd.vx v24, v8, %[A]" ::[A] "r"(scalar));
VCMP_U64(20, v24, 6, 3, 8, 1, 10, -1, 12, -3, 14, -5, 16, -7, 18, -9, 20, -11);
#endif
}

void TEST_CASE6(void) {
const uint32_t scalar = 5;

VSET(16, e8, m2);
VLOAD_8(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e8, m8);
VLOAD_8(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vx v6, v2, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U8(21, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vx v24, v8, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U8(21, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

VSET(16, e16, m2);
VLOAD_16(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e16, m8);
VLOAD_16(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vx v6, v2, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U16(22, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vx v24, v8, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U16(22, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

VSET(16, e32, m2);
VLOAD_32(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e32, m8);
VLOAD_32(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vx v6, v2, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U32(23, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vx v24, v8, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U32(23, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);

#if ELEN == 64
VSET(16, e64, m2);
VLOAD_64(v2, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VSET(16, e64, m8);
VLOAD_64(v8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
VLOAD_8(v0, 0xAA, 0xAA);
VCLEAR(v6);
asm volatile("vadd.vx v6, v2, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U64(24, v6, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
VCLEAR(v24);
asm volatile("vadd.vx v24, v8, %[A], v0.t" ::[A] "r"(scalar));
VCMP_U64(24, v24, 0, 7, 0, 9, 0, 11, 0, 13, 0, 7, 0, 9, 0, 11, 0, 13);
#endif
}

Expand Down
Loading

0 comments on commit f2f1939

Please sign in to comment.