From 3374d743f2852eb0f1c32b9e0ace82f102f2752d Mon Sep 17 00:00:00 2001 From: Brandon Wu Date: Mon, 3 Jun 2024 22:56:20 -0700 Subject: [PATCH] Add missing vcpop, vclz, vctz in api tests --- .../vector-crypto/api-testing/vcpop.c | 178 +++++++ .../vector-crypto/intrinsic_funcs.md | 52 +++ ...bit-manipulation_used_in_cryptography.adoc | 52 +++ .../vector-crypto/llvm-api-tests/vcpop.c | 182 ++++++++ .../llvm-overloaded-tests/vcpop.c | 182 ++++++++ .../overloaded-api-testing/vcpop.c | 178 +++++++ .../overloaded_intrinsic_funcs.md | 52 +++ ...bit-manipulation_used_in_cryptography.adoc | 52 +++ .../policy_funcs/api-testing/vclz.c | 354 ++++++++++++++ .../policy_funcs/api-testing/vcpop.c | 354 ++++++++++++++ .../policy_funcs/api-testing/vctz.c | 354 ++++++++++++++ .../policy_funcs/intrinsic_funcs.md | 282 +++++++++++- ...bit-manipulation_used_in_cryptography.adoc | 282 +++++++++++- .../policy_funcs/llvm-api-tests/vclz.c | 365 +++++++++++++++ .../policy_funcs/llvm-api-tests/vcpop.c | 358 +++++++++++++++ .../policy_funcs/llvm-api-tests/vctz.c | 365 +++++++++++++++ .../policy_funcs/llvm-overloaded-tests/vclz.c | 434 ++++++++++++++++++ .../llvm-overloaded-tests/vcpop.c | 427 +++++++++++++++++ .../policy_funcs/llvm-overloaded-tests/vctz.c | 434 ++++++++++++++++++ .../overloaded-api-testing/vclz.c | 423 +++++++++++++++++ .../overloaded-api-testing/vcpop.c | 423 +++++++++++++++++ .../overloaded-api-testing/vctz.c | 423 +++++++++++++++++ .../overloaded_intrinsic_funcs.md | 282 +++++++++++- ...bit-manipulation_used_in_cryptography.adoc | 282 +++++++++++- .../rvv_intrinsic_gen/vector_crypto_inst.py | 12 +- 25 files changed, 6777 insertions(+), 5 deletions(-) create mode 100644 auto-generated/vector-crypto/api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/llvm-api-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/overloaded-api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c create mode 100644 auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c diff --git a/auto-generated/vector-crypto/api-testing/vcpop.c b/auto-generated/vector-crypto/api-testing/vcpop.c new file mode 100644 index 000000000..d3c52d8fd --- /dev/null +++ b/auto-generated/vector-crypto/api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/intrinsic_funcs.md b/auto-generated/vector-crypto/intrinsic_funcs.md index 993aa0ff3..5e8c4df54 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/intrinsic_funcs.md @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index e5ac8dce7..be1bbf32e 100644 --- a/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8 (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4 (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2 (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1 (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2 (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4 (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8 (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4 (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2 (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1 (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2 (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4 (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8 (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2 (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1 (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2 (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4 (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8 (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1 (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2 (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4 (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8 (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_m (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_m (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_m (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_m (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_m (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_m (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_m (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_m (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_m (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_m (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_m (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_m (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_m (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_m (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_m (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_m (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_m (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_m (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_m (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_m (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_m (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_m (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..1061c2222 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-api-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_m(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_m(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_m(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_m(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_m(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_m(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_m(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_m(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_m(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_m(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_m(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_m(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_m(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_m(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_m(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_m(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_m(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_m(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_m(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_m(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_m(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_m(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..aee4aff80 --- /dev/null +++ b/auto-generated/vector-crypto/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,182 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..cf5ec1edd --- /dev/null +++ b/auto-generated/vector-crypto/overloaded-api-testing/vcpop.c @@ -0,0 +1,178 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8(vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4(vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2(vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1(vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2(vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4(vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8(vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4(vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2(vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1(vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2(vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4(vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8(vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2(vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1(vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2(vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4(vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8(vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1(vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2(vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4(vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8(vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_m(vbool8_t vm, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_m(vbool4_t vm, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_m(vbool2_t vm, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_m(vbool1_t vm, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_m(vbool16_t vm, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_m(vbool8_t vm, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_m(vbool4_t vm, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_m(vbool2_t vm, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_m(vbool32_t vm, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_m(vbool16_t vm, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_m(vbool8_t vm, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_m(vbool4_t vm, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_m(vbool64_t vm, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_m(vbool32_t vm, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_m(vbool16_t vm, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_m(vbool8_t vm, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop(vm, vs2, vl); +} diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md index 00ace0fa3..fe4429338 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs.md @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[overloaded-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index 44331cd43..c32b967ed 100644 --- a/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -333,6 +333,58 @@ vuint64m4_t __riscv_vctz (vbool16_t vm, vuint64m4_t vs2, size_t vl); vuint64m8_t __riscv_vctz (vbool8_t vm, vuint64m8_t vs2, size_t vl); ---- +[[overloaded-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop (vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop (vbool64_t vm, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop (vbool32_t vm, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop (vbool16_t vm, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop (vbool8_t vm, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop (vbool4_t vm, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop (vbool2_t vm, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop (vbool1_t vm, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop (vbool64_t vm, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop (vbool32_t vm, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop (vbool16_t vm, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop (vbool8_t vm, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop (vbool4_t vm, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop (vbool2_t vm, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop (vbool64_t vm, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop (vbool32_t vm, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop (vbool16_t vm, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop (vbool8_t vm, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop (vbool4_t vm, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop (vbool64_t vm, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop (vbool32_t vm, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop (vbool16_t vm, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop (vbool8_t vm, vuint64m8_t vs2, size_t vl); +---- + [[overloaded-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c new file mode 100644 index 000000000..6e3e1120f --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vclz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c new file mode 100644 index 000000000..7dbb9b78c --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vcpop.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c new file mode 100644 index 000000000..b191067e8 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/api-testing/vctz.c @@ -0,0 +1,354 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md index 80603a138..444080442 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs.md @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t v [[policy-variant-]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index b44053593..4433b14fb 100644 --- a/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t v [[policy-variant-]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_v_u8mf8_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_v_u8mf8_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_v_u8mf4_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_v_u8mf2_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_v_u8m1_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_v_u8m2_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_v_u8m4_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_v_u8m8_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_v_u16mf4_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_v_u16mf2_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_v_u16m1_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_v_u16m2_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_v_u16m4_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_v_u16m8_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_v_u32mf2_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_v_u32m1_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_v_u32m2_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_v_u32m4_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_v_u32m8_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_v_u64m1_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_v_u64m2_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_v_u64m4_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_v_u64m8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c new file mode 100644 index 000000000..d9c132cd7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vclz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c new file mode 100644 index 000000000..2f89711dc --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vcpop.c @@ -0,0 +1,358 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c new file mode 100644 index 000000000..54d7ee887 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-api-tests/vctz.c @@ -0,0 +1,365 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf8_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf4_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u8mf2_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_v_u8m1_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_v_u8m2_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_v_u8m4_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_v_u8m8_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf4_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u16mf2_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_v_u16m1_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_v_u16m2_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_v_u16m4_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_v_u16m8_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_v_u32mf2_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_v_u32m1_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_v_u32m2_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_v_u32m4_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_v_u32m8_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_v_u64m1_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_v_u64m2_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_v_u64m4_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_v_u64m8_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c new file mode 100644 index 000000000..e93b008a3 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vclz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c new file mode 100644 index 000000000..4eb8efa2b --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vcpop.c @@ -0,0 +1,427 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c new file mode 100644 index 000000000..8cecc11d2 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/llvm-overloaded-tests/vctz.c @@ -0,0 +1,434 @@ +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zvl512b \ +// RUN: -target-feature +zvbb \ +// RUN: -target-feature +zvbc \ +// RUN: -target-feature +zvkg \ +// RUN: -target-feature +zvkned \ +// RUN: -target-feature +zvknhb \ +// RUN: -target-feature +zvksed \ +// RUN: -target-feature +zvksh -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c new file mode 100644 index 000000000..2d8b78be7 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vclz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vclz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vclz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vclz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vclz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vclz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vclz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vclz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vclz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vclz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vclz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vclz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vclz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vclz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vclz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vclz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vclz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vclz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vclz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vclz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vclz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vclz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vclz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vclz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vclz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vclz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c new file mode 100644 index 000000000..10f897107 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vcpop.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vcpop_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vuint16m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vuint16m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vuint16m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, + vuint32m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, + vuint32m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, + vuint64m8_t vs2, size_t vl) { + return __riscv_vcpop_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vcpop_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vcpop_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vcpop_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vcpop_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vcpop_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vcpop_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vcpop_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vcpop_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vcpop_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vcpop_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vcpop_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vcpop_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vcpop_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vcpop_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vcpop_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vcpop_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vcpop_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vcpop_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vcpop_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vcpop_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vcpop_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vcpop_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vcpop_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c new file mode 100644 index 000000000..3e0bce679 --- /dev/null +++ b/auto-generated/vector-crypto/policy_funcs/overloaded-api-testing/vctz.c @@ -0,0 +1,423 @@ +#include +#include + +vuint8mf8_t test_vctz_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2, + size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) { + return __riscv_vctz_tu(vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tum(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tum(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tum(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tum(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vuint16m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd, + vuint32m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd, + vuint32m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_tumu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd, + vuint64m1_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd, + vuint64m2_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd, + vuint64m4_t vs2, size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_tumu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_tumu(vm, vd, vs2, vl); +} + +vuint8mf8_t test_vctz_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf4_t test_vctz_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8mf2_t test_vctz_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m1_t test_vctz_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m2_t test_vctz_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m4_t test_vctz_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint8m8_t test_vctz_v_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf4_t test_vctz_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vuint16mf4_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16mf2_t test_vctz_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vuint16mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m1_t test_vctz_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m2_t test_vctz_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m4_t test_vctz_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint16m8_t test_vctz_v_u16m8_mu(vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32mf2_t test_vctz_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd, + vuint32mf2_t vs2, size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m1_t test_vctz_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m2_t test_vctz_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m4_t test_vctz_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint32m8_t test_vctz_v_u32m8_mu(vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m1_t test_vctz_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m2_t test_vctz_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m4_t test_vctz_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} + +vuint64m8_t test_vctz_v_u64m8_mu(vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, + size_t vl) { + return __riscv_vctz_mu(vm, vd, vs2, vl); +} diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md index 559a5b80c..f2f92ae9f 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs.md @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size [[policy-variant-overloaded]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-overloaded]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc index eef98e779..a4d961b88 100644 --- a/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc +++ b/auto-generated/vector-crypto/policy_funcs/overloaded_intrinsic_funcs/00_zvbb_-_vector_bit-manipulation_used_in_cryptography.adoc @@ -463,7 +463,287 @@ vuint64m8_t __riscv_vrev8_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size [[policy-variant-overloaded]] ==== Vector Basic Bit-manipulation - Count Bits -Intrinsics here don't have a policy variant. + +[,c] +---- +vuint8mf8_t __riscv_vclz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vclz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vclz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vclz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vclz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vclz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vclz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vclz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vclz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vclz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vclz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vclz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vclz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vclz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vclz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vclz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vclz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vclz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vclz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vclz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vclz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vclz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vclz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +vuint8mf8_t __riscv_vctz_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vctz_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vctz_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vctz_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vctz_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vctz_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vctz_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vctz_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vctz_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vctz_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vctz_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vctz_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vctz_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vctz_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vctz_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vctz_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vctz_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vctz_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vctz_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vctz_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vctz_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vctz_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- + +[[policy-variant-overloaded]] +==== Vector Basic Bit-manipulation - Vector Population Count + +[,c] +---- +vuint8mf8_t __riscv_vcpop_tu (vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tu (vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tu (vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tu (vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tu (vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tu (vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tu (vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tu (vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tu (vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tu (vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tu (vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tu (vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tu (vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tu (vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tu (vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tu (vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tu (vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tu (vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tu (vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tu (vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tu (vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tu (vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tum (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tum (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tum (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tum (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tum (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tum (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tum (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tum (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tum (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tum (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tum (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tum (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tum (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tum (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tum (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tum (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tum (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tum (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tum (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tum (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tum (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tum (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_tumu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_tumu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_tumu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_tumu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_tumu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_tumu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_tumu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_tumu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_tumu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_tumu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_tumu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_tumu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_tumu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_tumu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_tumu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_tumu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_tumu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_tumu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_tumu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_tumu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_tumu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_tumu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +// masked functions +vuint8mf8_t __riscv_vcpop_mu (vbool64_t vm, vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl); +vuint8mf4_t __riscv_vcpop_mu (vbool32_t vm, vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl); +vuint8mf2_t __riscv_vcpop_mu (vbool16_t vm, vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl); +vuint8m1_t __riscv_vcpop_mu (vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2, size_t vl); +vuint8m2_t __riscv_vcpop_mu (vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2, size_t vl); +vuint8m4_t __riscv_vcpop_mu (vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2, size_t vl); +vuint8m8_t __riscv_vcpop_mu (vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2, size_t vl); +vuint16mf4_t __riscv_vcpop_mu (vbool64_t vm, vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl); +vuint16mf2_t __riscv_vcpop_mu (vbool32_t vm, vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl); +vuint16m1_t __riscv_vcpop_mu (vbool16_t vm, vuint16m1_t vd, vuint16m1_t vs2, size_t vl); +vuint16m2_t __riscv_vcpop_mu (vbool8_t vm, vuint16m2_t vd, vuint16m2_t vs2, size_t vl); +vuint16m4_t __riscv_vcpop_mu (vbool4_t vm, vuint16m4_t vd, vuint16m4_t vs2, size_t vl); +vuint16m8_t __riscv_vcpop_mu (vbool2_t vm, vuint16m8_t vd, vuint16m8_t vs2, size_t vl); +vuint32mf2_t __riscv_vcpop_mu (vbool64_t vm, vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl); +vuint32m1_t __riscv_vcpop_mu (vbool32_t vm, vuint32m1_t vd, vuint32m1_t vs2, size_t vl); +vuint32m2_t __riscv_vcpop_mu (vbool16_t vm, vuint32m2_t vd, vuint32m2_t vs2, size_t vl); +vuint32m4_t __riscv_vcpop_mu (vbool8_t vm, vuint32m4_t vd, vuint32m4_t vs2, size_t vl); +vuint32m8_t __riscv_vcpop_mu (vbool4_t vm, vuint32m8_t vd, vuint32m8_t vs2, size_t vl); +vuint64m1_t __riscv_vcpop_mu (vbool64_t vm, vuint64m1_t vd, vuint64m1_t vs2, size_t vl); +vuint64m2_t __riscv_vcpop_mu (vbool32_t vm, vuint64m2_t vd, vuint64m2_t vs2, size_t vl); +vuint64m4_t __riscv_vcpop_mu (vbool16_t vm, vuint64m4_t vd, vuint64m4_t vs2, size_t vl); +vuint64m8_t __riscv_vcpop_mu (vbool8_t vm, vuint64m8_t vd, vuint64m8_t vs2, size_t vl); +---- [[policy-variant-overloaded]] ==== Vector Bit-manipulation used in Cryptography - Rotate diff --git a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py index 1877e46b9..7635912e1 100644 --- a/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py +++ b/rvv-intrinsic-generator/rvv_intrinsic_gen/vector_crypto_inst.py @@ -40,7 +40,17 @@ def gen(g): UITYPE, SEWS, LMULS, - decorators.has_masking_no_maskedoff) + decorators.has_masking_maskedoff_policy) + + g.function_group( + vector_crypto_template, + "Vector Basic Bit-manipulation - Vector Population Count", + "", # FIXME: We probably have a separate document for vector-crypto + ["vcpop"], + UITYPE, + SEWS, + LMULS, + decorators.has_masking_maskedoff_policy) g.function_group( vector_crypto_template,