diff --git a/auto-generated/vector-crypto/api-testing/vaesdf.c b/auto-generated/vector-crypto/api-testing/vaesdf.c new file mode 100644 index 000000000..e5b912a42 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdf.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesdm.c b/auto-generated/vector-crypto/api-testing/vaesdm.c new file mode 100644 index 000000000..903beeddf --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesdm.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesef.c b/auto-generated/vector-crypto/api-testing/vaesef.c new file mode 100644 index 000000000..375059d4d --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesef.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesem.c b/auto-generated/vector-crypto/api-testing/vaesem.c new file mode 100644 index 000000000..76aa9d61b --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesem.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaeskf1.c b/auto-generated/vector-crypto/api-testing/vaeskf1.c new file mode 100644 index 000000000..a6f2fbd00 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf1.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaeskf2.c b/auto-generated/vector-crypto/api-testing/vaeskf2.c new file mode 100644 index 000000000..060b9874f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaeskf2.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vaesz.c b/auto-generated/vector-crypto/api-testing/vaesz.c new file mode 100644 index 000000000..f3c6760ce --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vaesz.c @@ -0,0 +1,58 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vandn.c b/auto-generated/vector-crypto/api-testing/vandn.c new file mode 100644 index 000000000..7400c8a58 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vandn.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vbrev.c b/auto-generated/vector-crypto/api-testing/vbrev.c new file mode 100644 index 000000000..fd22f6114 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vbrev8.c b/auto-generated/vector-crypto/api-testing/vbrev8.c new file mode 100644 index 000000000..6d29c2665 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vbrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclmul.c b/auto-generated/vector-crypto/api-testing/vclmul.c new file mode 100644 index 000000000..3fd21fa7f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmul.c @@ -0,0 +1,66 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclmulh.c b/auto-generated/vector-crypto/api-testing/vclmulh.c new file mode 100644 index 000000000..a4c69311e --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclmulh.c @@ -0,0 +1,66 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vclz.c b/auto-generated/vector-crypto/api-testing/vclz.c new file mode 100644 index 000000000..1fa92a927 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vclz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vcpop.c b/auto-generated/vector-crypto/api-testing/vcpop.c new file mode 100644 index 000000000..d3c52d8fd --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vctz.c b/auto-generated/vector-crypto/api-testing/vctz.c new file mode 100644 index 000000000..eadb46e90 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vctz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vghsh.c b/auto-generated/vector-crypto/api-testing/vghsh.c new file mode 100644 index 000000000..accbf01e5 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vghsh.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vgmul.c b/auto-generated/vector-crypto/api-testing/vgmul.c new file mode 100644 index 000000000..4d9028a54 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vgmul.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vrev8.c b/auto-generated/vector-crypto/api-testing/vrev8.c new file mode 100644 index 000000000..c0b367a61 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vrol.c b/auto-generated/vector-crypto/api-testing/vrol.c new file mode 100644 index 000000000..f4ee9ffbb --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vrol.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vror.c b/auto-generated/vector-crypto/api-testing/vror.c new file mode 100644 index 000000000..9c8f32431 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vror.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2ch.c b/auto-generated/vector-crypto/api-testing/vsha2ch.c new file mode 100644 index 000000000..89c32480f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ch.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2cl.c b/auto-generated/vector-crypto/api-testing/vsha2cl.c new file mode 100644 index 000000000..f213d6477 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2cl.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsha2ms.c b/auto-generated/vector-crypto/api-testing/vsha2ms.c new file mode 100644 index 000000000..77ef0289a --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsha2ms.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm3c.c b/auto-generated/vector-crypto/api-testing/vsm3c.c new file mode 100644 index 000000000..67d0f776f --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3c.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm3me.c b/auto-generated/vector-crypto/api-testing/vsm3me.c new file mode 100644 index 000000000..5307ba8bb --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm3me.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm4k.c b/auto-generated/vector-crypto/api-testing/vsm4k.c new file mode 100644 index 000000000..a33e29d8a --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4k.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vsm4r.c b/auto-generated/vector-crypto/api-testing/vsm4r.c new file mode 100644 index 000000000..b0c2fdfe1 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vsm4r.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/api-testing/vwsll.c b/auto-generated/vector-crypto/api-testing/vwsll.c new file mode 100644 index 000000000..5e6a1a884 --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vwsll.c @@ -0,0 +1,242 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md new file mode 100644 index 000000000..5e8c4df54 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -0,0 +1,938 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..be1bbf32e --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,638 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8 (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4 (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2 (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1 (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2 (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4 (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8 (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4 (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2 (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1 (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2 (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4 (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8 (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2 (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1 (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2 (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4 (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8 (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8 (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8 (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8 (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8 (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4 (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8 (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1 (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2 (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4 (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8 (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_m (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_m (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_m (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4 (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4 (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2 (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2 (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1 (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1 (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2 (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2 (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4 (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4 (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8 (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8 (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2 (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2 (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1 (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1 (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2 (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2 (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4 (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4 (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8 (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8 (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1 (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2 (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4 (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8 (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_m (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_m (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_m (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_m (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_m (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_m (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_m (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_m (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_m (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_m (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_m (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_m (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_m (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_m (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_m (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_m (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_m (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_m (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_m (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_m (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_m (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_m (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_m (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_m (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..6e9c0a1b9 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,42 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1 (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1 (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2 (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2 (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4 (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4 (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8 (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8 (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_m (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_m (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_m (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_m (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_m (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_m (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_m (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_m (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..83f9816cb --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,19 @@ + +=== Zvkg - Vector GCM/GMAC + +[[]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..929328cba --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,130 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..6ce0c9cf6 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,43 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1 (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2 (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4 (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8 (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..55a267250 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,40 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8 (vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1 (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2 (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8 (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2 (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4 (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8 (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4 (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8 (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8 (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..a83f0b809 --- /dev/null +++ b/auto-generated/vector-crypto/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2 (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1 (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2 (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4 (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8 (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..715c7881c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdf.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..c35b87b37 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesdm.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..081cfe140 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesef.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..cf43774f1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesem.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..b92fbdead --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf1.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..aa796c5b2 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaeskf2.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..bdb19ece1 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vaesz.c @@ -0,0 +1,83 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/llvm-api-tests/vandn.c new file mode 100644 index 000000000..3f8f4c0a5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vandn.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..602551b22 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..dbb64b45e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vbrev8.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..d6697a372 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmul.c @@ -0,0 +1,85 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..94fbc51e7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclmulh.c @@ -0,0 +1,85 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/llvm-api-tests/vclz.c new file mode 100644 index 000000000..6320cf1a7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vclz.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..1061c2222 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/llvm-api-tests/vctz.c new file mode 100644 index 000000000..926741260 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vctz.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..6b2db98f6 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vghsh.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..1abf16248 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vgmul.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_vv_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..717dfd27d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrev8.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/llvm-api-tests/vrol.c new file mode 100644 index 000000000..1bddb1516 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vrol.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vror.c b/auto-generated/vector-crypto/llvm-api-tests/vror.c new file mode 100644 index 000000000..073c1fe05 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vror.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf8_m(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf8_m(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf4_m(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf4_m(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8mf2_m(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8mf2_m(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m1_m(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m1_m(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m2_m(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m2_m(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m4_m(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m4_m(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u8m8_m(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u8m8_m(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..78924df94 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ch.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..739a9da5e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2cl.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..72201942a --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsha2ms.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..06ae64701 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3c.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_vi_u32mf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..9aefcd323 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm3me.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me_vv_u32mf2(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..e5f6bd386 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4k.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..8119a4331 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vsm4r.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_u32mf2(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..eda2e00d3 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vwsll.c @@ -0,0 +1,284 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32mf2(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_m(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf4_m(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_m(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16mf2_m(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_m(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m1_m(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m2_m(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m2_m(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m4_m(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m4_m(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u16m8_m(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u16m8_m(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_m(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32mf2_m(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_m(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m1_m(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_m(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m2_m(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m4_m(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m4_m(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u32m8_m(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u32m8_m(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_m(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m1_m(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_m(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m2_m(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_m(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m4_m(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_vv_u64m8_m(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_vx_u64m8_m(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..83837f66d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..6bc6faa5b --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..a42aac84e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..2cb5113a7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..393a2329f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..e1d85453a --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..b98fe52ba --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,83 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..302997b03 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vandn.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..9654a13b5 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..68503540f --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..994a54025 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,85 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..fbfa406f6 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,85 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..c6a727dfc --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vclz.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..aee4aff80 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..10223ef94 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vctz.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..bd18a2c0e --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..ed81badec --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..6f491581c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..2e24afa14 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vrol.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..6fdd3e527 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vror.c @@ -0,0 +1,412 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..2924cdc47 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..b2078e33d --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..e1afaede7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..3d23e0142 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..86f271de7 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..248207cfc --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..6cb46317c --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..029180986 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,284 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..a240f30cd --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdf.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..44e4a38fb --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesdm.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..8a032c2f8 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesef.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..e6f666ea6 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesem.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..73358e70e --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..a15310d57 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..76a5d32fc --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vaesz.c @@ -0,0 +1,58 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..61d7a594f --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vandn.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..5a27daa73 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..9d0d77b91 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vbrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..cf48adf9c --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmul.c @@ -0,0 +1,66 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..7000a93e5 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclmulh.c @@ -0,0 +1,66 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..d93faf0f3 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vclz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..cf5ec1edd --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..51d6c57e9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vctz.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..055ce6727 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vghsh.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..4067ca01b --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vgmul.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..3391569f2 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrev8.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..a1900207c --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vrol.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/overloaded-api-testing/vror.c new file mode 100644 index 000000000..e87ad43c8 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vror.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8(vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8(vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8(vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1(vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2(vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4(vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8(vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..d04129849 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..4de7b49aa --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..70a696804 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..728566e46 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3c.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..299159174 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm3me.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me(vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..882694054 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4k.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k(vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..cb106c8a5 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vsm4r.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..8696b7d1d --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vwsll.c @@ -0,0 +1,242 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4(vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2(vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1(vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2(vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4(vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8(vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2(vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2(vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1(vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2(vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4(vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8(vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1(vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2(vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4(vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8(vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_m(vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_m(vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_m(vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_m(vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_m(vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_m(vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_m(vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_m(vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_m(vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_m(vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_m(vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_m(vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_m(vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_m(vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll(vm, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_m(vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll(vm, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md new file mode 100644 index 000000000..fe4429338 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -0,0 +1,938 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[overloaded-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[overloaded-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[overloaded-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[overloaded-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[overloaded-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..c32b967ed --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,638 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn (vbool64_t vm, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn (vbool32_t vm, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn (vbool16_t vm, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn (vbool8_t vm, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn (vbool4_t vm, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn (vbool2_t vm, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn (vbool1_t vm, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn (vbool64_t vm, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn (vbool32_t vm, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn (vbool16_t vm, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn (vbool8_t vm, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn (vbool4_t vm, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn (vbool2_t vm, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn (vbool64_t vm, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn (vbool32_t vm, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn (vbool16_t vm, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn (vbool8_t vm, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn (vbool4_t vm, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8 (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8 (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8 (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8 (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8 (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8 (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8 (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8 (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8 (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8 (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8 (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8 (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8 (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8 (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8 (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8 (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8 (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8 (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8 (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8 (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8 (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8 (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror (vbool1_t vm, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror (vbool2_t vm, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror (vbool4_t vm, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror (vbool64_t vm, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror (vbool32_t vm, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror (vbool16_t vm, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror (vbool8_t vm, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[overloaded-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll (vbool64_t vm, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll (vbool32_t vm, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll (vbool16_t vm, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll (vbool8_t vm, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll (vbool4_t vm, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll (vbool2_t vm, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll (vbool64_t vm, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll (vbool32_t vm, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll (vbool16_t vm, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll (vbool8_t vm, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll (vbool4_t vm, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll (vbool64_t vm, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll (vbool32_t vm, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll (vbool16_t vm, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll (vbool8_t vm, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..174233382 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,42 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[overloaded-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh (vbool64_t vm, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh (vbool32_t vm, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh (vbool16_t vm, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh (vbool8_t vm, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..3b38c6571 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,19 @@ + +=== Zvkg - Vector GCM/GMAC + +[[overloaded-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..407f673d9 --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,130 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[overloaded-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[overloaded-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1 (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1 (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1 (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1 (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1 (vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2 (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2 (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2 (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2 (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2 (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..0c818e28d --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,43 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[overloaded-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..f5ad8d8fa --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,40 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[overloaded-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k (vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k (vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k (vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k (vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k (vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[overloaded-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..ddf0b441c --- /dev/null +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[overloaded-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me (vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me (vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me (vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me (vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me (vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[overloaded-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c new file mode 100644 index 000000000..43eef93e8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdf.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c new file mode 100644 index 000000000..3c1d89651 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesdm.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c new file mode 100644 index 000000000..1b82fcd8c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesef.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c new file mode 100644 index 000000000..1db0f1bda --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesem.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c new file mode 100644 index 000000000..4bbd0fb10 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf1.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c new file mode 100644 index 000000000..30150c660 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaeskf2.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c new file mode 100644 index 000000000..25486191d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vaesz.c @@ -0,0 +1,58 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c new file mode 100644 index 000000000..786635b20 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vandn.c @@ -0,0 +1,706 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c new file mode 100644 index 000000000..5a16e6adf --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c new file mode 100644 index 000000000..6186201fc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vbrev8.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c new file mode 100644 index 000000000..22f2b9b4b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmul.c @@ -0,0 +1,130 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c new file mode 100644 index 000000000..a43662a06 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclmulh.c @@ -0,0 +1,130 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c new file mode 100644 index 000000000..6e3e1120f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c new file mode 100644 index 000000000..7dbb9b78c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c new file mode 100644 index 000000000..b191067e8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c new file mode 100644 index 000000000..731050d9c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vghsh.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c new file mode 100644 index 000000000..ed035adf4 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vgmul.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c new file mode 100644 index 000000000..ef1976f3e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrev8.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c new file mode 100644 index 000000000..d630a488c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vrol.c @@ -0,0 +1,706 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c new file mode 100644 index 000000000..f62f3eb6e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vror.c @@ -0,0 +1,706 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c new file mode 100644 index 000000000..1d9b85bc0 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ch.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c new file mode 100644 index 000000000..468a4d938 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2cl.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c new file mode 100644 index 000000000..9ee82d425 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsha2ms.c @@ -0,0 +1,38 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c new file mode 100644 index 000000000..f420557dc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3c.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c new file mode 100644 index 000000000..9b635b0d8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm3me.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c new file mode 100644 index 000000000..270812106 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4k.c @@ -0,0 +1,22 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c new file mode 100644 index 000000000..4c95663f3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vsm4r.c @@ -0,0 +1,78 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c new file mode 100644 index 000000000..56b35568a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vwsll.c @@ -0,0 +1,482 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md new file mode 100644 index 000000000..444080442 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -0,0 +1,1572 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..4433b14fb --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,1238 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_vv_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_vx_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_vv_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_vx_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_vv_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_vx_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_vv_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_vx_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_vv_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_vx_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_vv_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_vx_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_vv_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_vx_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_vv_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_vx_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vv_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_vx_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_vv_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_vx_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_vv_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_vx_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_vv_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_vx_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_vv_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_vx_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vv_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_vx_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_vv_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_vx_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_vv_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_vx_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_vv_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_vx_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_vv_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_vx_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..559ba54e5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,76 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vv_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_vx_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vv_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_vx_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vv_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_vx_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vv_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_vx_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..cf2c6a401 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,19 @@ + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..29d2463a1 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,130 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..2aec4fd51 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,43 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_vv_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_vv_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_vv_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_vv_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..95d0f470f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,40 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_u32mf2_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32mf2_u32m1_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32mf2_u32m2_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32mf2_u32m4_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32mf2_u32m8_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_u32m1_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m1_u32m2_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m1_u32m4_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m1_u32m8_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_u32m2_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m2_u32m4_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m2_u32m8_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_u32m4_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_u32m4_u32m8_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..589216717 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_vv_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_vv_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_vv_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_vv_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_vv_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_vi_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_vi_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_vi_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_vi_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_vi_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c new file mode 100644 index 000000000..990433721 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdf.c @@ -0,0 +1,89 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdf_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdf_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c new file mode 100644 index 000000000..80a243721 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesdm.c @@ -0,0 +1,89 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesdm_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesdm_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c new file mode 100644 index 000000000..224ac4953 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesef.c @@ -0,0 +1,89 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesef_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesef_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c new file mode 100644 index 000000000..fa0a10105 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesem.c @@ -0,0 +1,89 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesem_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaesem_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c new file mode 100644 index 000000000..cc4667e80 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf1.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf1_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c new file mode 100644 index 000000000..7f05b473c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaeskf2.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vaeskf2_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c new file mode 100644 index 000000000..f50cae600 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vaesz.c @@ -0,0 +1,69 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vaesz_vs_u32m4_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c new file mode 100644 index 000000000..8e79acfdd --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vandn.c @@ -0,0 +1,717 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c new file mode 100644 index 000000000..1faa2260e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c new file mode 100644 index 000000000..737992ff9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vbrev8.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c new file mode 100644 index 000000000..c776dacad --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmul.c @@ -0,0 +1,141 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c new file mode 100644 index 000000000..94df486ca --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclmulh.c @@ -0,0 +1,141 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c new file mode 100644 index 000000000..d9c132cd7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..2f89711dc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c @@ -0,0 +1,358 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c new file mode 100644 index 000000000..54d7ee887 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c new file mode 100644 index 000000000..e3f7395a9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vghsh.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c new file mode 100644 index 000000000..e4920e5d1 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vgmul.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c new file mode 100644 index 000000000..61471ea81 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrev8.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c new file mode 100644 index 000000000..0dacd5b3e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vrol.c @@ -0,0 +1,717 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c new file mode 100644 index 000000000..c28fb02ee --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vror.c @@ -0,0 +1,717 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf8_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u8mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vror_vv_u8m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vror_vv_u8m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vror_vv_u8m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) { + return __riscv_vror_vv_u8m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u8m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vror_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vror_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vror_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) { + return __riscv_vror_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vror_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vror_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vror_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vror_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vror_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vror_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vror_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vror_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c new file mode 100644 index 000000000..97c413c75 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ch.c @@ -0,0 +1,49 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c new file mode 100644 index 000000000..8f43c4416 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2cl.c @@ -0,0 +1,49 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c new file mode 100644 index 000000000..bb48799a5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsha2ms.c @@ -0,0 +1,49 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_vv_u64m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c new file mode 100644 index 000000000..ccf8caa8b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3c.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c new file mode 100644 index 000000000..3ebf605aa --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm3me.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_vv_u32m8_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c new file mode 100644 index 000000000..8f353c311 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4k.c @@ -0,0 +1,33 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32mf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m4_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_vi_u32m8_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c new file mode 100644 index 000000000..06f9b3ffc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vsm4r.c @@ -0,0 +1,89 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32mf2_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32mf2_u32m8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m1_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m1_u32m8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m2_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m2_u32m8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m4_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vs_u32m4_u32m8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_u32m8_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c new file mode 100644 index 000000000..63da91ed1 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vwsll.c @@ -0,0 +1,493 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u16m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u16m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32mf2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32mf2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u32m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u32m8_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m1_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m1_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m2_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m2_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m4_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m4_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_vv_u64m8_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_vx_u64m8_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c new file mode 100644 index 000000000..6d2504f27 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdf.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c new file mode 100644 index 000000000..1fa488b00 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesdm.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c new file mode 100644 index 000000000..5635721bb --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesef.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c new file mode 100644 index 000000000..a7f05f2b8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesem.c @@ -0,0 +1,108 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c new file mode 100644 index 000000000..c3f94c976 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf1.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c new file mode 100644 index 000000000..2df41ac05 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaeskf2.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c new file mode 100644 index 000000000..877402aee --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vaesz.c @@ -0,0 +1,83 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c new file mode 100644 index 000000000..af084405c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vandn.c @@ -0,0 +1,950 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c new file mode 100644 index 000000000..a9c542556 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c new file mode 100644 index 000000000..a986b5ece --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vbrev8.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c new file mode 100644 index 000000000..11f24f0b9 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmul.c @@ -0,0 +1,189 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c new file mode 100644 index 000000000..f9a4a8af7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclmulh.c @@ -0,0 +1,193 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..e93b008a3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..4eb8efa2b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,427 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..8cecc11d2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c new file mode 100644 index 000000000..5a1670759 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vghsh.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c new file mode 100644 index 000000000..995625243 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vgmul.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c new file mode 100644 index 000000000..62c1e3e1e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrev8.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c new file mode 100644 index 000000000..6617d9830 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vrol.c @@ -0,0 +1,926 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c new file mode 100644 index 000000000..0fb6a2d3f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vror.c @@ -0,0 +1,926 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c new file mode 100644 index 000000000..e61e23e6d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ch.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c new file mode 100644 index 000000000..5ca7969f5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2cl.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c new file mode 100644 index 000000000..ef3478429 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsha2ms.c @@ -0,0 +1,58 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c new file mode 100644 index 000000000..3bc96a360 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3c.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c new file mode 100644 index 000000000..2fd5ab2ed --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm3me.c @@ -0,0 +1,38 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c new file mode 100644 index 000000000..acf15ab27 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4k.c @@ -0,0 +1,34 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c new file mode 100644 index 000000000..e8ba1fd59 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vsm4r.c @@ -0,0 +1,104 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c new file mode 100644 index 000000000..49b8ee5a0 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vwsll.c @@ -0,0 +1,650 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c new file mode 100644 index 000000000..1c1f98128 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdf.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdf_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdf_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdf_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdf_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdf_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdf_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c new file mode 100644 index 000000000..2eb7f3517 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesdm.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesdm_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesdm_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesdm_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesdm_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesdm_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesdm_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c new file mode 100644 index 000000000..bd17e9ddc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesef.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesef_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesef_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesef_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesef_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesef_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesef_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c new file mode 100644 index 000000000..fdbb66b41 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesem.c @@ -0,0 +1,97 @@ +#include +#include + +vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vaesem_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesem_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesem_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesem_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesem_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaesem_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c new file mode 100644 index 000000000..8f11194f5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf1.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf1_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c new file mode 100644 index 000000000..2f71a4d13 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaeskf2.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vaeskf2_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c new file mode 100644 index 000000000..6687ccb2c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vaesz.c @@ -0,0 +1,72 @@ +#include +#include + +vuint32mf2_t test_vaesz_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vaesz_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vaesz_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vaesz_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vaesz_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vaesz_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c new file mode 100644 index 000000000..73315e18a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vandn.c @@ -0,0 +1,939 @@ +#include +#include + +vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, + size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vandn_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vandn_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vandn_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vandn_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vandn_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vandn_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vandn_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vandn_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vandn_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vandn_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vandn_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vandn_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vandn_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vandn_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + uint8_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vandn_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vandn_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vandn_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vandn_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, uint16_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vandn_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vandn_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vandn_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vandn_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vandn_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vandn_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vandn_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vandn_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + uint16_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vandn_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vandn_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, uint32_t rs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vandn_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vandn_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vandn_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vandn_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vandn_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vandn_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vandn_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vandn_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + uint32_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vandn_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vandn_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vandn_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vandn_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vandn_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vandn_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vandn_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vandn_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vandn_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c new file mode 100644 index 000000000..b46e2114c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c new file mode 100644 index 000000000..03f632695 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vbrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vbrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vbrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vbrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vbrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vbrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vbrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vbrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vbrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vbrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vbrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vbrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vbrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vbrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vbrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vbrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vbrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vbrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vbrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vbrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vbrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vbrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vbrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vbrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vbrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vbrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c new file mode 100644 index 000000000..488ab2300 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmul.c @@ -0,0 +1,178 @@ +#include +#include + +vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmul_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmul_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmul_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmul_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmul_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmul_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmul_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmul_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmul_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmul_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmul_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c new file mode 100644 index 000000000..06a287746 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclmulh.c @@ -0,0 +1,182 @@ +#include +#include + +vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + uint64_t rs1, size_t vl) { + return __riscv_vclmulh_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, + size_t vl) { + return __riscv_vclmulh_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vclmulh_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vclmulh_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vclmulh_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vclmulh_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vclmulh_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vclmulh_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vclmulh_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vclmulh_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, uint64_t rs1, size_t vl) { + return __riscv_vclmulh_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..2d8b78be7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..10f897107 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..3e0bce679 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c new file mode 100644 index 000000000..a346e788f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vghsh.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vghsh_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vghsh_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vghsh_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vghsh_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vghsh_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vghsh_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c new file mode 100644 index 000000000..fc282ac60 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vgmul.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vgmul_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m1_t test_vgmul_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m2_t test_vgmul_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m4_t test_vgmul_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} + +vuint32m8_t test_vgmul_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vgmul_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c new file mode 100644 index 000000000..d56e3555e --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrev8.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vrev8_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vrev8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vrev8_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vrev8_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vrev8_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vrev8_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vrev8_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vrev8_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vrev8_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vrev8_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vrev8_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vrev8_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vrev8_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vrev8_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vrev8_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vrev8_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vrev8_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vrev8_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vrev8_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vrev8_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vrev8_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vrev8_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vrev8_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vrev8_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vrev8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c new file mode 100644 index 000000000..0f9405dcd --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vrol.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vrol_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vrol_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vrol_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vrol_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vrol_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vrol_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vrol_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vrol_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vrol_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vrol_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vrol_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vrol_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vrol_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vrol_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vrol_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vrol_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vrol_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vrol_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vrol_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vrol_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vrol_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vrol_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vrol_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vrol_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vrol_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vrol_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vrol_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vrol_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vrol_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vrol_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vrol_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vrol_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vrol_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vrol_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vrol_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vrol_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vrol_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vrol_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vrol_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vrol_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vrol_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vrol_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vrol_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vrol_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vrol_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vrol_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c new file mode 100644 index 000000000..6f97b5a85 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vror.c @@ -0,0 +1,915 @@ +#include +#include + +vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, + size_t vl) { + return __riscv_vror_tu(vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tum(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, vuint16m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, vuint32m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, vuint64m1_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, vuint64m2_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, vuint64m4_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, vuint64m8_t vs1, + size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_tumu(vm, vd, vs2, rs1, vl); +} + +vuint8mf8_t test_vror_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf8_t test_vror_vx_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf4_t test_vror_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf4_t test_vror_vx_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8mf2_t test_vror_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8mf2_t test_vror_vx_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m1_t test_vror_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m1_t test_vror_vx_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m2_t test_vror_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m2_t test_vror_vx_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m4_t test_vror_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m4_t test_vror_vx_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint8m8_t test_vror_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + vuint8m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint8m8_t test_vror_vx_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vror_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vror_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vror_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vror_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vror_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vror_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vror_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vror_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vror_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vror_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vror_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + vuint16m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vror_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vror_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vror_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vror_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vror_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vror_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vror_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vror_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vror_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vror_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vror_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vror_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vror_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vror_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vror_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vror_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vror_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vror_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vror_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vror_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c new file mode 100644 index 000000000..eb2435d9c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ch.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ch_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c new file mode 100644 index 000000000..f657a7901 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2cl.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2cl_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c new file mode 100644 index 000000000..349f16c5b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsha2ms.c @@ -0,0 +1,47 @@ +#include +#include + +vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, + vuint64m1_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, + vuint64m2_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, + vuint64m4_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, + vuint64m8_t vs1, size_t vl) { + return __riscv_vsha2ms_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c new file mode 100644 index 000000000..1778de96a --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3c.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm3c_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c new file mode 100644 index 000000000..f4536867d --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm3me.c @@ -0,0 +1,27 @@ +#include +#include + +vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, + vuint32m8_t vs1, size_t vl) { + return __riscv_vsm3me_tu(vd, vs2, vs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c new file mode 100644 index 000000000..ac789ac44 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4k.c @@ -0,0 +1,23 @@ +#include +#include + +vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} + +vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4k_tu(vd, vs2, 0, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c new file mode 100644 index 000000000..46cf176d3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vsm4r.c @@ -0,0 +1,93 @@ +#include +#include + +vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vsm4r_vs_u32mf2_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32mf2_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32mf2_u32m2_tu(vuint32m2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32mf2_u32m4_tu(vuint32m4_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32mf2_u32m8_tu(vuint32m8_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m1_t test_vsm4r_vs_u32m1_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m1_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m1_u32m4_tu(vuint32m4_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m1_u32m8_tu(vuint32m8_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m2_t test_vsm4r_vs_u32m2_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m2_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m2_u32m8_tu(vuint32m8_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} + +vuint32m4_t test_vsm4r_vs_u32m4_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vs_u32m4_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vsm4r_vs_tu(vd, vs2, vl); +} + +vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vsm4r_vv_tu(vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c new file mode 100644 index 000000000..e3736d299 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vwsll.c @@ -0,0 +1,639 @@ +#include +#include + +vuint16mf4_t test_vwsll_vv_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + vuint8mf8_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tu(vuint16mf4_t vd, vuint8mf8_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + vuint8mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tu(vuint16mf2_t vd, vuint8mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, + vuint8mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tu(vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tu(vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tu(vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tu(vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + vuint16mf4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tu(vuint32mf2_t vd, vuint16mf4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, + vuint16mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tu(vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, + vuint16m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tu(vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tu(vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tu(vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, + vuint32mf2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tu(vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, + vuint32m1_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tu(vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, + vuint32m2_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tu(vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_tu(vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tu(vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tu(vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tum(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tum(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tum(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tum(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, vuint8m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint8m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, vuint8m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint8m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, vuint8m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint8m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, vuint16m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint16m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, vuint16m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint16m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, vuint32m4_t vs1, + size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint32m4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_tumu(vm, vd, vs2, rs1, vl); +} + +vuint16mf4_t test_vwsll_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, vuint8mf8_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf4_t test_vwsll_vx_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint8mf8_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16mf2_t test_vwsll_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, vuint8mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16mf2_t test_vwsll_vx_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint8mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m1_t test_vwsll_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, vuint8mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m1_t test_vwsll_vx_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vuint8mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m2_t test_vwsll_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + vuint8m1_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m2_t test_vwsll_vx_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m4_t test_vwsll_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + vuint8m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m4_t test_vwsll_vx_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint16m8_t test_vwsll_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + vuint8m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint16m8_t test_vwsll_vx_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32mf2_t test_vwsll_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, vuint16mf4_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32mf2_t test_vwsll_vx_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint16mf4_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m1_t test_vwsll_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, vuint16mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m1_t test_vwsll_vx_u32m1_mu(vbool32_t vm, vuint32m1_t vd, + vuint16mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m2_t test_vwsll_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, vuint16m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m2_t test_vwsll_vx_u32m2_mu(vbool16_t vm, vuint32m2_t vd, + vuint16m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m4_t test_vwsll_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + vuint16m2_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m4_t test_vwsll_vx_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint32m8_t test_vwsll_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + vuint16m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint32m8_t test_vwsll_vx_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m1_t test_vwsll_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, vuint32mf2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m1_t test_vwsll_vx_u64m1_mu(vbool64_t vm, vuint64m1_t vd, + vuint32mf2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m2_t test_vwsll_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, vuint32m1_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m2_t test_vwsll_vx_u64m2_mu(vbool32_t vm, vuint64m2_t vd, + vuint32m1_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m4_t test_vwsll_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, vuint32m2_t vs1, + size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m4_t test_vwsll_vx_u64m4_mu(vbool16_t vm, vuint64m4_t vd, + vuint32m2_t vs2, size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} + +vuint64m8_t test_vwsll_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + vuint32m4_t vs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, vs1, vl); +} + +vuint64m8_t test_vwsll_vx_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, + size_t rs1, size_t vl) { + return __riscv_vwsll_mu(vm, vd, vs2, rs1, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md new file mode 100644 index 000000000..f2f92ae9f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -0,0 +1,1572 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-overloaded]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-overloaded]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc new file mode 100644 index 000000000..a4d961b88 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -0,0 +1,1238 @@ + +=== Zvbb - Vector Bit-manipulation used in Cryptography + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Bitwise And-Not + +[,c] +---- +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tu (vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tu (vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tu (vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tu (vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tu (vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tu (vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tu (vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tu (vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tu (vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tu (vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tu (vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tu (vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tu (vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tu (vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tu (vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tu (vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tu (vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tu (vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vandn_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vandn_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vandn_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vandn_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vandn_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vandn_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vandn_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vandn_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vandn_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vandn_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vandn_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vandn_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vandn_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vandn_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vandn_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vandn_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vandn_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vandn_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vandn_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vandn_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vandn_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vandn_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Reverse + +[,c] +---- +vuint8mf8_t __riscv_vbrev_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vbrev_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vbrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vbrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vbrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vbrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vbrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vbrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vbrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vbrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vbrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vbrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vbrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vbrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vbrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vbrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vbrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vbrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vbrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vbrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vbrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vbrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vbrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vbrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vrev8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vrev8_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vrev8_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vrev8_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vrev8_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vrev8_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vrev8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vrev8_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vrev8_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vrev8_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vrev8_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vrev8_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vrev8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vrev8_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vrev8_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vrev8_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vrev8_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vrev8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vrev8_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vrev8_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vrev8_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Count Bits + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Bit-manipulation used in Cryptography - Rotate + +[,c] +---- +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +// masked functions +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vrol_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vrol_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vrol_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vrol_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vrol_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vrol_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vrol_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vrol_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vrol_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vrol_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vrol_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vrol_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vrol_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vrol_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vrol_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vrol_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vrol_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vrol_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vrol_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vrol_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vrol_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vrol_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint8mf8_t __riscv_vror_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint8mf4_t __riscv_vror_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint8mf2_t __riscv_vror_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint8m1_t __riscv_vror_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint8m2_t __riscv_vror_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint8m4_t __riscv_vror_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl); +vuint8m8_t __riscv_vror_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t rs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint16mf4_t __riscv_vror_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint16mf2_t __riscv_vror_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint16m1_t __riscv_vror_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint16m2_t __riscv_vror_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint16m4_t __riscv_vror_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl); +vuint16m8_t __riscv_vror_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32mf2_t __riscv_vror_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m1_t __riscv_vror_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m2_t __riscv_vror_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m4_t __riscv_vror_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32m8_t __riscv_vror_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vror_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vror_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vror_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vror_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t rs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation used - Widening Shift + +[,c] +---- +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tu (vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tu (vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tu (vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tu (vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tu (vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tu (vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tu (vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tu (vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tu (vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tu (vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tu (vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tu (vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tu (vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tu (vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tu (vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tum (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tum (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tum (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tum (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tum (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tum (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tum (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tum (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tum (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tum (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tum (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tum (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tum (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tum (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tum (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_tumu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_tumu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_tumu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_tumu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_tumu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_tumu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_tumu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_tumu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_tumu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_tumu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_tumu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_tumu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_tumu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_tumu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_tumu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +// masked functions +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl); +vuint16mf4_t __riscv_vwsll_mu (vbool64_t vm, vuint16mf4_t vd, vuint8mf8_t vs2, size_t rs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl); +vuint16mf2_t __riscv_vwsll_mu (vbool32_t vm, vuint16mf2_t vd, vuint8mf4_t vs2, size_t rs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl); +vuint16m1_t __riscv_vwsll_mu (vbool16_t vm, vuint16m1_t vd, vuint8mf2_t vs2, size_t rs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl); +vuint16m2_t __riscv_vwsll_mu (vbool8_t vm, vuint16m2_t vd, vuint8m1_t vs2, size_t rs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl); +vuint16m4_t __riscv_vwsll_mu (vbool4_t vm, vuint16m4_t vd, vuint8m2_t vs2, size_t rs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl); +vuint16m8_t __riscv_vwsll_mu (vbool2_t vm, vuint16m8_t vd, vuint8m4_t vs2, size_t rs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl); +vuint32mf2_t __riscv_vwsll_mu (vbool64_t vm, vuint32mf2_t vd, vuint16mf4_t vs2, size_t rs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vwsll_mu (vbool32_t vm, vuint32m1_t vd, vuint16mf2_t vs2, size_t rs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl); +vuint32m2_t __riscv_vwsll_mu (vbool16_t vm, vuint32m2_t vd, vuint16m1_t vs2, size_t rs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl); +vuint32m4_t __riscv_vwsll_mu (vbool8_t vm, vuint32m4_t vd, vuint16m2_t vs2, size_t rs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl); +vuint32m8_t __riscv_vwsll_mu (vbool4_t vm, vuint32m8_t vd, vuint16m4_t vs2, size_t rs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint64m1_t __riscv_vwsll_mu (vbool64_t vm, vuint64m1_t vd, vuint32mf2_t vs2, size_t rs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint64m2_t __riscv_vwsll_mu (vbool32_t vm, vuint64m2_t vd, vuint32m1_t vs2, size_t rs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint64m4_t __riscv_vwsll_mu (vbool16_t vm, vuint64m4_t vd, vuint32m2_t vs2, size_t rs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint64m8_t __riscv_vwsll_mu (vbool8_t vm, vuint64m8_t vd, vuint32m4_t vs2, size_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc new file mode 100644 index 000000000..98ab2a820 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/01_zvbc_-_vector_carryless_multiplication.adoc @@ -0,0 +1,76 @@ + +=== Zvbc - Vector Carryless Multiplication + +[[policy-variant-overloaded]] +==== Vector Carryless Multiplication + +[,c] +---- +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tu (vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tu (vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tu (vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tu (vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +// masked functions +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmul_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmul_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmul_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmul_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m1_t __riscv_vclmulh_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, uint64_t rs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m2_t __riscv_vclmulh_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, uint64_t rs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m4_t __riscv_vclmulh_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, uint64_t rs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint64m8_t __riscv_vclmulh_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, uint64_t rs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc new file mode 100644 index 000000000..36e253baf --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/02_zvkg_-_vector_gcm_gmac.adoc @@ -0,0 +1,19 @@ + +=== Zvkg - Vector GCM/GMAC + +[[policy-variant-overloaded]] +==== Vector GCM/GMAC + +[,c] +---- +vuint32mf2_t __riscv_vghsh_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vghsh_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vghsh_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vghsh_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vghsh_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vgmul_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vgmul_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vgmul_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vgmul_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vgmul_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc new file mode 100644 index 000000000..46b66b36f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/03_zvkned_-_nist_suite:_vector_aes_block_cipher.adoc @@ -0,0 +1,130 @@ + +=== Zvkned - NIST Suite: Vector AES Block Cipher + +[[policy-variant-overloaded]] +==== Vector AES Encryption + +[,c] +---- +vuint32mf2_t __riscv_vaesef_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesef_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesef_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesef_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesef_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesef_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesem_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesem_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesem_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesem_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesem_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES Decryption + +[,c] +---- +vuint32mf2_t __riscv_vaesdf_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdf_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdf_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdf_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdf_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdf_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vaesdm_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vaesdm_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesdm_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vaesdm_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesdm_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES-128 Forward KeySchedule generation + +[,c] +---- +vuint32mf2_t __riscv_vaeskf1_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf1_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf1_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf1_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +vuint32mf2_t __riscv_vaeskf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vaeskf2_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vaeskf2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vaeskf2_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vaeskf2_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector AES round zero + +[,c] +---- +vuint32mf2_t __riscv_vaesz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vaesz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vaesz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vaesz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vaesz_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc new file mode 100644 index 000000000..118223db5 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/04_zvknh_-_nist_suite:_vector_sha-2_secure_hash.adoc @@ -0,0 +1,43 @@ + +=== Zvknh - NIST Suite: Vector SHA-2 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SHA-2 message schedule + +[,c] +---- +vuint32mf2_t __riscv_vsha2ms_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ms_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ms_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ms_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ms_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ms_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ms_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ms_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ms_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SHA-2 two rounds of compression + +[,c] +---- +vuint32mf2_t __riscv_vsha2ch_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2ch_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2ch_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2ch_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2ch_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2ch_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2ch_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2ch_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2ch_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +vuint32mf2_t __riscv_vsha2cl_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsha2cl_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsha2cl_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsha2cl_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsha2cl_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +vuint64m1_t __riscv_vsha2cl_tu (vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl); +vuint64m2_t __riscv_vsha2cl_tu (vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl); +vuint64m4_t __riscv_vsha2cl_tu (vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl); +vuint64m8_t __riscv_vsha2cl_tu (vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc new file mode 100644 index 000000000..304925935 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/05_zvksed_-_shangmi_suite:_sm4_block_cipher.adoc @@ -0,0 +1,40 @@ + +=== Zvksed - ShangMi Suite: SM4 Block Cipher + +[[policy-variant-overloaded]] +==== Vector SM4 KeyExpansion + +[,c] +---- +vuint32mf2_t __riscv_vsm4k_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm4k_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm4k_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm4k_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm4k_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM4 Rounds + +[,c] +---- +vuint32mf2_t __riscv_vsm4r_vv_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32mf2_t __riscv_vsm4r_vs_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vv_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m1_t __riscv_vsm4r_vs_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m1_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m1_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vv_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m2_t __riscv_vsm4r_vs_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m2_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vv_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m4_t __riscv_vsm4r_vs_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vs_tu (vuint32m8_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vsm4r_vv_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +---- diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc new file mode 100644 index 000000000..b907f2879 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/06_zvksh_-_shangmi_suite:_sm3_secure_hash.adoc @@ -0,0 +1,26 @@ + +=== Zvksh - ShangMi Suite: SM3 Secure Hash + +[[policy-variant-overloaded]] +==== Vector SM3 Message Expansion + +[,c] +---- +vuint32mf2_t __riscv_vsm3me_tu (vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl); +vuint32m1_t __riscv_vsm3me_tu (vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl); +vuint32m2_t __riscv_vsm3me_tu (vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl); +vuint32m4_t __riscv_vsm3me_tu (vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl); +vuint32m8_t __riscv_vsm3me_tu (vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector SM3 Compression + +[,c] +---- +vuint32mf2_t __riscv_vsm3c_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t uimm, size_t vl); +vuint32m1_t __riscv_vsm3c_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t uimm, size_t vl); +vuint32m2_t __riscv_vsm3c_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t uimm, size_t vl); +vuint32m4_t __riscv_vsm3c_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t uimm, size_t vl); +vuint32m8_t __riscv_vsm3c_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t uimm, size_t vl); +---- diff --git a/rvv-intrinsic-generator/Makefile b/rvv-intrinsic-generator/Makefile index 5044f51ff..c789e419a 100644 --- a/rvv-intrinsic-generator/Makefile +++ b/rvv-intrinsic-generator/Makefile @@ -55,6 +55,8 @@ MAIN := rvv_intrinsic_gen.main BF16_INST := $(RVV_INTRINSIC_GEN_PATH)/bfloat16_inst.py # Script to clang-format the auto-generated adoc files CLANG_FORMAT_ADOC = clang_format_autogen +# Extra flags specified when calling rvv_intrinsic_gen.main +EXTRA_FLAG := # Main output directory is default to auto-generated OUTPUT_DIR := ../auto-generated # Derives output directory for each set of intrinsics @@ -164,50 +166,38 @@ gen-gnu-test: gnu-overloaded-test gnu-non-overloaded-test # Generate all-in-one document for non-overloaded intrinsics non-overloaded-doc: - $(call gen_doc,$(DIR),intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.adoc,$@,--has-policy) - $(call clang_format_adoc, --file, $(DIR)/intrinsic_funcs.adoc) - $(call clang_format_adoc, --file, $(POLICY_DIR)/intrinsic_funcs.adoc) + $(call gen_doc,$(DIR),intrinsic_funcs.md,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),intrinsic_funcs.md,$@,--has-policy $(EXTRA_FLAG)) # Generate grouped documents for non-overloaded intrinsics non-overloaded-docs: - $(call gen_docs,$(DIR),intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy) - $(call clang_format_adoc, --folder, $(DIR)/intrinsic_funcs) - $(call clang_format_adoc, --folder, $(POLICY_DIR)/intrinsic_funcs) + $(call gen_docs,$(DIR),intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) # Generate all-in-one document for overloaded intrinsics overloaded-doc: - $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.adoc,$@,) - $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.adoc,$@,--has-policy) - $(call clang_format_adoc, --file, $(DIR)/overloaded_intrinsic_funcs.adoc) - $(call clang_format_adoc, --file, $(POLICY_DIR)/overloaded_intrinsic_funcs.adoc) + $(call gen_doc,$(DIR),overloaded_intrinsic_funcs.md,$@,$(EXTRA_FLAG)) + $(call gen_doc,$(POLICY_DIR),overloaded_intrinsic_funcs.md,$@,--has-policy $(EXTRA_FLAG)) # Generate grouped documents for overloaded intrinsics overloaded-docs: - $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,) - $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy) - $(call clang_format_adoc, --folder, $(DIR)/overloaded_intrinsic_funcs) - $(call clang_format_adoc, --folder, $(POLICY_DIR)/overloaded_intrinsic_funcs) + $(call gen_docs,$(DIR),overloaded_intrinsic_funcs,$@,$(EXTRA_FLAG)) + $(call gen_docs,$(POLICY_DIR),overloaded_intrinsic_funcs,$@,--has-policy $(EXTRA_FLAG)) # Generate non-overloaded intrinsic testing C source files non-overloaded-test: - $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy) - clang-format -i $(DIR)/api-testing/* - clang-format -i $(POLICY_DIR)/api-testing/* + $(call gen_tests,$(DIR)/api-testing,non-overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/api-testing,non-overloaded-test,--has-policy $(EXTRA_FLAG)) # Generate overloaded intrinsic testing C source files overloaded-test: - $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,) - $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy) - clang-format -i $(DIR)/overloaded-api-testing/* - clang-format -i $(POLICY_DIR)/overloaded-api-testing/* + $(call gen_tests,$(DIR)/overloaded-api-testing,overloaded-test,$(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/overloaded-api-testing,overloaded-test,--has-policy $(EXTRA_FLAG)) # Generate non-overloaded intrinsic testing C source files llvm-non-overloaded-test: - $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-api-tests,non-overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-api-tests) $(call replace_float, $(POLICY_DIR)/llvm-api-tests) clang-format -i $(DIR)/llvm-api-tests/* @@ -215,8 +205,8 @@ llvm-non-overloaded-test: # Generate overloaded intrinsic testing C source files llvm-overloaded-test: - $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm) - $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy) + $(call gen_tests,$(DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm $(EXTRA_FLAG)) + $(call gen_tests,$(POLICY_DIR)/llvm-overloaded-tests,overloaded-test,--toolchain-type llvm --has-policy $(EXTRA_FLAG)) $(call replace_float, $(DIR)/llvm-overloaded-tests) $(call replace_float, $(POLICY_DIR)/llvm-overloaded-tests) clang-format -i $(DIR)/llvm-overloaded-tests/* @@ -292,18 +282,15 @@ bf16-llvm-overloaded-test: # Generate the adaptor header for v0.10 non-policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,) + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,non-policy.h,non-overloaded-compatible-header,$(EXTRA_FLAG)) policy-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,policy.h,non-overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) non-policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-non-policy.h,overloaded-compatible-header,$(EXTRA_FLAG)) policy-overloaded-compatible-header: - $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy) - clang-format -i $(DIR)/rvv-v0p10-compatible-headers/* + $(call gen_doc,$(DIR)/rvv-v0p10-compatible-headers,overloaded-policy.h,overloaded-compatible-header,--has-policy $(EXTRA_FLAG)) ############################################################################### @@ -359,6 +346,15 @@ diff-autogen: $(call check_defined, TEST_DIR, output directory for documents/tests generation) rm -rf ${abspath ${TEST_DIR}} make OUTPUT_DIR=${TEST_DIR} + make EXTRA_FLAG=--gen-vector-crypto OUTPUT_DIR=${TEST_DIR}/vector-crypto + +# Remove redundant folder created for vector crypto. The reason this line is +# needed is because the targets in this Makefile to generate compatible header +# creates a folder in prior before running the script. The vector crypto, +# however, does not need compatible header because it does not exist before +# v0.10. + rm -rf ${TEST_DIR}/vector-crypto/rvv-v0p10-compatible-headers + diff -qr ${TEST_DIR} ${GOLDEN_DIR} ############################################################################### diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py index e2ae21964..5d3f20c6c 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/constants.py @@ -28,6 +28,7 @@ NSEWS = [16, 32, 64] TYPES = ["float", "int", "uint"] ITYPES = ["int", "uint"] +UITYPE = ["uint"] FTYPES = ["float"] MTYPES = ["bool"] MLENS = [1, 2, 4, 8, 16, 32, 64] diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py index 1f4f9ada9..c2e27f798 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/generator.py @@ -257,7 +257,8 @@ def get_overloaded_op_name(name): overloaded_name = "_".join([sn[0], sn[1], sn[-1]]) elif any(op in name for op in [ "vzext", "vsext", "vwadd", "vwsub", "vfwadd", "vfwsub", "vwadd", - "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv" + "vwsub", "vfwadd", "vfwsub", "vmv", "vfmv", "vsm4r", "vaesef", "vaesem", + "vaesdf", "vaesdm" ]): # 2. compiler can not distinguish *.wx and *.vx, need encode them in # suffix, for example: @@ -455,7 +456,7 @@ def inst_group_prologue(self): def inst_group_epilogue(self): return "" - def write_file_header(self, has_float_type, has_bfloat16_type): + def write_file_header(self, has_float_type, has_bfloat16_type, name): #pylint: disable=line-too-long int_llvm_header = r"""// REQUIRES: riscv-registered-target // RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ @@ -484,9 +485,38 @@ def write_file_header(self, has_float_type, has_bfloat16_type): r""" -Wno-psabi -O3 -fno-schedule-insns -fno-schedule-insns2" } */ """) + + vector_crypto_llvm_header = (r"""// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +""") + + def is_vector_crypto_inst(name): + vector_crypto_inst = [ + "vandn", "vbrev", "vbrev8", "vrev8", "vclz", "vctz", "vrol", "vror", + "vwsll", "vclmul", "vclmulh", "vghsh", "vgmul", "vaesef", "vaesem", + "vaesdf", "vaesdm", "vaeskf1", "vaeskf2", "vaesz", "vsha2ms", + "vsha2ch", "vsha2cl", "vsm4k", "vsm4r", "vsm3me", "vsm3c" + ] + for inst in vector_crypto_inst: + if inst in name: + return True + return False + if self.toolchain_type == ToolChainType.LLVM: if has_bfloat16_type: self.fd.write(bfloat16_llvm_header) + elif is_vector_crypto_inst(name): + self.fd.write(vector_crypto_llvm_header) elif has_float_type: self.fd.write(float_llvm_header) else: @@ -542,6 +572,7 @@ def func(self, inst_info, name, return_type, **kwargs): # For "vxrm" parameter of the fixed-point intrinsics, value for it must be # an immediate. func_decl = func_decl.replace(", unsigned int vxrm", "") + func_decl = func_decl.replace(", size_t uimm", "") # For "frm" parameter of the floating-point intrinsics, value for it must # be an immediate. @@ -566,7 +597,7 @@ def func(self, inst_info, name, return_type, **kwargs): has_float_type = True if header: - self.write_file_header(has_float_type, has_bfloat16_type) + self.write_file_header(has_float_type, has_bfloat16_type, name) def output_call_arg(arg_name, type_name): if ((name.startswith("vget") or name.startswith("vset")) \ @@ -580,6 +611,9 @@ def output_call_arg(arg_name, type_name): if arg_name == "frm": return "__RISCV_FRM_RNE" + if arg_name == "uimm": + return "0" + return arg_name # Write test func body. diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py index f9b84daf1..fe0205d1b 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/main.py @@ -24,6 +24,7 @@ import importlib.util import inspect import inst +import vector_crypto_inst import generator from enums import ToolChainType @@ -104,6 +105,7 @@ class GenTypes: parser.add_argument("--skip-default-inst", default=False, action="store_true") parser.add_argument("--vendor-generator-script") parser.add_argument("--vendor-generator-name") + parser.add_argument("--gen-vector-crypto", default=False, action="store_true") parser.add_argument("--out") args = parser.parse_args() @@ -137,6 +139,12 @@ class GenTypes: GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, GenTypes.OVERLOADED_COMPATIBLE_HEADER ]: + # Vector crypto does not need compatible header because we don't have + # them before v0.10 + if mode in (GenTypes.NON_OVERLOADED_COMPATIBLE_HEADER, + GenTypes.OVERLOADED_COMPATIBLE_HEADER) and\ + args.gen_vector_crypto: + return with open(args.out, "w", encoding="utf-8") as f: if mode == GenTypes.NON_OVERLOADED_DOC: g = generator.DocGenerator(f, True, args.has_policy) @@ -150,7 +158,10 @@ class GenTypes: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: @@ -173,7 +184,10 @@ class GenTypes: else: assert False if not args.skip_default_inst: - inst.gen(g) + if args.gen_vector_crypto: + vector_crypto_inst.gen(g) + else: + inst.gen(g) else: print("Skipping default RVV instructions (--skip-default-inst)") if vendor_gen is not None: diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py new file mode 100644 index 000000000..28b5a466a --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/templates/vector_crypto_template.py @@ -0,0 +1,180 @@ +""" +Template for rendering vector crypto intrinsics. +Current version is for v20230531. +https://github.com/riscv/riscv-crypto/blob/v20230531/doc/vector/riscv-crypto-spec-vector.adoc +""" + +from utils import prod +from utils import TypeHelper +from enums import InstInfo +from enums import InstType +from enums import ExtraAttr + +operand_mnemonic_dict = {} +# Zvbb: Vector Bit-manipulation used in Cryptography +operand_mnemonic_dict["vandn"] = ["vv", "vx"] +operand_mnemonic_dict["vbrev"] = ["v"] +operand_mnemonic_dict["vbrev8"] = ["v"] +operand_mnemonic_dict["vrev8"] = ["v"] +operand_mnemonic_dict["vclz"] = ["v"] +operand_mnemonic_dict["vctz"] = ["v"] +operand_mnemonic_dict["vcpop"] = ["v"] +operand_mnemonic_dict["vrol"] = ["vv", "vx"] +operand_mnemonic_dict["vror"] = ["vv", "vx"] # saving the `vi` variant +operand_mnemonic_dict["vwsll"] = ["vv", "vx"] # saving the `vi` variant +# Zvbc: Vector Carryless Multiplication +operand_mnemonic_dict["vclmul"] = ["vv", "vx"] +operand_mnemonic_dict["vclmulh"] = ["vv", "vx"] +# Zvkg: Vector GCM/GMAC +operand_mnemonic_dict["vghsh"] = ["vv"] +operand_mnemonic_dict["vgmul"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vaesef"] = ["vv", "vs"] +operand_mnemonic_dict["vaesem"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdf"] = ["vv", "vs"] +operand_mnemonic_dict["vaesdm"] = ["vv", "vs"] +operand_mnemonic_dict["vaeskf1"] = ["vi"] +operand_mnemonic_dict["vaeskf2"] = ["vi"] +operand_mnemonic_dict["vaesz"] = ["vs"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsha2ms"] = ["vv"] +operand_mnemonic_dict["vsha2ch"] = ["vv"] +operand_mnemonic_dict["vsha2cl"] = ["vv"] +# Zvkned: NIST Suite: Vector AES Block Cipher +operand_mnemonic_dict["vsm4k"] = ["vi"] +operand_mnemonic_dict["vsm4r"] = ["vv", "vs"] +# Zvksh: ShangMi Suite: SM3 Secure Hash +operand_mnemonic_dict["vsm3me"] = ["vv"] +operand_mnemonic_dict["vsm3c"] = ["vi"] + + +def has_vd_input(name): + has_vd_input_inst_set = { + "vghsh", "vgmul", "vaesef", "vaesem", "vaesdf", "vaesdm", "vaesz", + "vsha2ms", "vsha2ch", "vsha2cl", "vsm4r", "vsm3c", "vaeskf2" + } + + return name in has_vd_input_inst_set + + +def has_vs1_input(name): + has_vs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh", "vghsh", "vsha2ms", + "vsha2ch", "vsha2cl", "vsm3me" + } + + return name in has_vs1_input_inst_set + + +def has_rs1_input(name): + has_rs1_input_inst_set = { + "vandn", "vrol", "vror", "vwsll", "vclmul", "vclmulh" + } + + return name in has_rs1_input_inst_set + + +def render(G, op_list, type_list, sew_list, lmul_list, decorator_list): + #pylint: disable=invalid-name + # FIXME: Renaming 'G' to 'g' all in once later. + G.inst_group_prologue() + + for decorator in decorator_list: + decorator.write_text_header(G) + for args in prod(OP=op_list, TYPE=type_list, SEW=sew_list, LMUL=lmul_list): + op = args["OP"] + for operand_mnemonic in operand_mnemonic_dict[op]: + if operand_mnemonic in ("vv", "vs"): + if op == "vwsll": + inst_info = InstInfo.get(args, decorator, InstType.WVV, + ExtraAttr.NO_ATTR) + else: + inst_info = InstInfo.get(args, decorator, InstType.VV, + ExtraAttr.NO_ATTR) + elif operand_mnemonic == "vx": + if op == "vwsll": + inst_info = InstInfo.get(args, decorator, InstType.WVX, + ExtraAttr.NO_ATTR) + else: + inst_info = InstInfo.get(args, decorator, InstType.VX, + ExtraAttr.NO_ATTR) + elif operand_mnemonic == "vi": + inst_info = InstInfo.get(args, decorator, InstType.VI, + ExtraAttr.NO_ATTR) + elif operand_mnemonic == "v": + inst_info = InstInfo.get(args, decorator, InstType.V, + ExtraAttr.NO_ATTR) + else: + assert False, "Unreachable, unrecognized mnemonic" + + args["MNEMONIC"] = operand_mnemonic + type_helper = TypeHelper(**args) + kwargs = {} + if op == "vwsll": + kwargs["return_type"] = type_helper.wv + else: + kwargs["return_type"] = type_helper.v + if op == "vwsll": + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.wv) + } + else: + kwargs = { + **kwargs, + **decorator.mask_args(type_helper.m, type_helper.v) + } + # If vd is already in the input parameter, we don't need to emit another + # parameter when tail policy is TU. + if has_vd_input(op): + kwargs["vd"] = type_helper.v + else: + if op == "vwsll": + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.wv)} + else: + kwargs = {**kwargs, **decorator.tu_dest_args(type_helper.v)} + + kwargs["vs2"] = type_helper.v + + if operand_mnemonic == "vv" and has_vs1_input(op): + kwargs["vs1"] = type_helper.v + if operand_mnemonic == "vx" and has_rs1_input(op): + if op in ["vwsll", "vrol", "vror"]: + kwargs["rs1"] = type_helper.size_t + else: + kwargs["rs1"] = type_helper.s + if "vi" in operand_mnemonic_dict[op]: + kwargs["uimm"] = type_helper.size_t + + kwargs["vl"] = type_helper.size_t + + if operand_mnemonic == "vs": + starting_from_lmul_index = lmul_list.index(args["LMUL"]) + # print(starting_from_lmul_index) + for i in range(starting_from_lmul_index, len(lmul_list)): + if args["LMUL"] == 8: + continue + + kwargs["return_type"] =\ + f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vd"] = f"v{args['TYPE']}{args['SEW']}m{lmul_list[i]}_t" + kwargs["vs2"] = f"v{args['TYPE']}{args['SEW']}m{args['LMUL']}_t" + func_name = "{OP}_{MNEMONIC}_".format_map(args) +\ + f"{args['TYPE']}{args['SEW']}m{args['LMUL']}_" +\ + f"{args['TYPE']}{args['SEW']}m{lmul_list[i]}" + G.func(inst_info, name=func_name + decorator.func_suffix, **kwargs) + else: + if op == "vwsll": + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{WSEW}m{WLMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + else: + G.func( + inst_info, + name="{OP}_{MNEMONIC}_{TYPE}{SEW}m{LMUL}".format_map(args) + + decorator.func_suffix, + **kwargs) + + G.inst_group_epilogue() diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py new file mode 100644 index 000000000..7635912e1 --- /dev/null +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -0,0 +1,220 @@ +""" +Declares the vector crypto intrinsics through the vector crypto template. +""" + +from intrinsic_decorator import IntrinsicDecorators +from templates import vector_crypto_template +from constants import LMULS, WLMULS, SEWS, WSEWS, UITYPE + + +def gen(g): + decorators = IntrinsicDecorators(g.has_tail_policy) + + g.start_group("Zvbb - Vector Bit-manipulation used in Cryptography") + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Bitwise And-Not", + "", # FIXME: We probably have a separate document for vector-crypto + ["vandn"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Reverse", + "", # FIXME: We probably have a separate document for vector-crypto + ["vbrev", "vbrev8", "vrev8"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Count Bits", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclz", "vctz"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Vector Population Count", + "", # FIXME: We probably have a separate document for vector-crypto + ["vcpop"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Bit-manipulation used in Cryptography - Rotate", + "", # FIXME: We probably have a separate document for vector-crypto + ["vrol", "vror"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation used - Widening Shift", + "", # FIXME: We probably have a separate document for vector-crypto + ["vwsll"], + UITYPE, + WSEWS, + WLMULS, + decorators.has_masking_maskedoff_policy) + + #################################################################### + + g.start_group("Zvbc - Vector Carryless Multiplication") + + g.function_group( + vector_crypto_template, + "Vector Carryless Multiplication", + "", # FIXME: We probably have a separate document for vector-crypto + ["vclmul", "vclmulh"], + UITYPE, + [64], + LMULS, + decorators.has_masking_maskedoff_policy) + + #################################################################### + + g.start_group("Zvkg - Vector GCM/GMAC") + + g.function_group( + vector_crypto_template, + "Vector GCM/GMAC", + "", # FIXME: We probably have a separate document for vector-crypto + ["vghsh", "vgmul"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### + + g.start_group("Zvkned - NIST Suite: Vector AES Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector AES Encryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesef", "vaesem"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES Decryption", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesdf", "vaesdm"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES-128 Forward KeySchedule generation", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaeskf1", "vaeskf2"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector AES round zero", + "", # FIXME: We probably have a separate document for vector-crypto + ["vaesz"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### + + g.start_group("Zvknh - NIST Suite: Vector SHA-2 Secure Hash") + + g.function_group( + vector_crypto_template, + "Vector SHA-2 message schedule", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ms"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SHA-2 two rounds of compression", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsha2ch", "vsha2cl"], + UITYPE, + [32, 64], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### + + g.start_group("Zvksed - ShangMi Suite: SM4 Block Cipher") + + g.function_group( + vector_crypto_template, + "Vector SM4 KeyExpansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4k"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SM4 Rounds", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm4r"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + #################################################################### + + g.start_group("Zvksh - ShangMi Suite: SM3 Secure Hash") + + g.function_group( + vector_crypto_template, + "Vector SM3 Message Expansion", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3me"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + g.function_group( + vector_crypto_template, + "Vector SM3 Compression", + "", # FIXME: We probably have a separate document for vector-crypto + ["vsm3c"], + UITYPE, + [32], + LMULS, + decorators.has_no_masking_policy) + + +#################################################################### diff --git a/vector_crypto_notes.adoc b/vector_crypto_notes.adoc new file mode 100644 index 000000000..e9c60396e --- /dev/null +++ b/vector_crypto_notes.adoc @@ -0,0 +1,15 @@ += Note for vector crypto intrinsics + +== Availability of vector crypto intrinsics + +Availability for the vector crypto instruction intrinsics will depend on the minimum vector length specified in the architecture via the `Zvl*b` ^0^ sub-extension. Vector length is required to be at least one EGW (element group width ^1^) long. + +Take the intrinsic of `vaesdf.vs` as an example. Given that the instruction will compute with a single element group provided from `vs2`, `vuint32mf2_t` of must be at least 128 bits long. Therefore the intrinsic requires `zvl256b` to be available. + +``` +vuint32m4_t __riscv_vaesdf_vs_u32m4 (vuint32m4_t vd, vuint32mf2_t vs2, size_t vl); +``` + +^0^ https://github.com/riscv/riscv-v-spec/blob/master/v-spec.adoc#181-zvl-minimum-vector-length-standard-extensions[v-spec 18.1. Zvl*: Minimum Vector Length Standard Extensions] + +^1^ https://github.com/riscv/riscv-crypto/blob/master/doc/vector/riscv-crypto-vector-element-groups.adoc[Vector Crypto Specification: Element Groups]